forked from etc/pineapple-src
early-access version 2253
This commit is contained in:
parent
34d1231736
commit
bd6cfd7191
65 changed files with 122278 additions and 20111 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2252.
|
This is the source code for early-access 2253.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
22
externals/Vulkan-Headers/README.md
vendored
22
externals/Vulkan-Headers/README.md
vendored
|
@ -2,17 +2,6 @@
|
||||||
|
|
||||||
Vulkan header files and API registry
|
Vulkan header files and API registry
|
||||||
|
|
||||||
## Default branch changed to 'main' 2021-09-12
|
|
||||||
|
|
||||||
As discussed in #222, the default branch of this repository is now 'main'. This change should be largely transparent to repository users, since github rewrites many references to the old 'master' branch to 'main'. However, if you have a checked-out local clone, you may wish to take the following steps as recommended by github:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git branch -m master main
|
|
||||||
git fetch origin
|
|
||||||
git branch -u origin/main main
|
|
||||||
git remote set-head origin -a
|
|
||||||
```
|
|
||||||
|
|
||||||
## Repository Content
|
## Repository Content
|
||||||
|
|
||||||
The contents of this repository are largely obtained from other repositories
|
The contents of this repository are largely obtained from other repositories
|
||||||
|
@ -46,23 +35,14 @@ Files in this repository originate from:
|
||||||
* README.md
|
* README.md
|
||||||
* cmake/Copyright_cmake.txt
|
* cmake/Copyright_cmake.txt
|
||||||
* cmake/cmake_uninstall.cmake.in
|
* cmake/cmake_uninstall.cmake.in
|
||||||
* Non-API headers (report issues to the [Vulkan-Loader/issues](https://github.com/KhronosGroup/Vulkan-Loader/issues) tracker)
|
* Non-API headers (report issues against @lenny-lunarg)
|
||||||
* include/vulkan/vk_icd.h
|
* include/vulkan/vk_icd.h
|
||||||
* include/vulkan/vk_layer.h
|
* include/vulkan/vk_layer.h
|
||||||
* include/vulkan/vk_sdk_platform.h
|
* include/vulkan/vk_sdk_platform.h
|
||||||
|
|
||||||
### Vulkan C++ Binding Repository (https://github.com/KhronosGroup/Vulkan-Hpp)
|
### Vulkan C++ Binding Repository (https://github.com/KhronosGroup/Vulkan-Hpp)
|
||||||
|
|
||||||
As of the Vulkan-Docs 1.2.182 spec update, the Vulkan-Hpp headers have been
|
|
||||||
split into multiple files. All of those files are now included in this
|
|
||||||
repository.
|
|
||||||
|
|
||||||
* include/vulkan/vulkan.hpp
|
* include/vulkan/vulkan.hpp
|
||||||
* include/vulkan/vulkan_enums.hpp
|
|
||||||
* include/vulkan/vulkan_funcs.hpp
|
|
||||||
* include/vulkan/vulkan_handles.hpp
|
|
||||||
* include/vulkan/vulkan_raii.hpp
|
|
||||||
* include/vulkan/vulkan_structs.hpp
|
|
||||||
|
|
||||||
## Version Tagging Scheme
|
## Version Tagging Scheme
|
||||||
|
|
||||||
|
|
|
@ -14,182 +14,170 @@ extern "C" {
|
||||||
#include "vk_video/vulkan_video_codecs_common.h"
|
#include "vk_video/vulkan_video_codecs_common.h"
|
||||||
|
|
||||||
// Vulkan 0.9 provisional Vulkan video H.264 encode and decode std specification version number
|
// Vulkan 0.9 provisional Vulkan video H.264 encode and decode std specification version number
|
||||||
#define VK_STD_VULKAN_VIDEO_CODEC_H264_API_VERSION_0_9_5 VK_MAKE_VIDEO_STD_VERSION(0, 9, 5) // Patch version should always be set to 0
|
#define VK_STD_VULKAN_VIDEO_CODEC_H264_API_VERSION_0_9 VK_MAKE_VIDEO_STD_VERSION(0, 9, 0) // Patch version should always be set to 0
|
||||||
|
|
||||||
// Format must be in the form XX.XX where the first two digits are the major and the second two, the minor.
|
// Format must be in the form XX.XX where the first two digits are the major and the second two, the minor.
|
||||||
#define VK_STD_VULKAN_VIDEO_CODEC_H264_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H264_API_VERSION_0_9_5
|
#define VK_STD_VULKAN_VIDEO_CODEC_H264_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H264_API_VERSION_0_9
|
||||||
#define VK_STD_VULKAN_VIDEO_CODEC_H264_EXTENSION_NAME "VK_STD_vulkan_video_codec_h264"
|
#define VK_STD_VULKAN_VIDEO_CODEC_H264_EXTENSION_NAME "VK_STD_vulkan_video_codec_h264"
|
||||||
|
|
||||||
// *************************************************
|
// *************************************************
|
||||||
// Video H.264 common definitions:
|
// Video H.264 common definitions:
|
||||||
// *************************************************
|
// *************************************************
|
||||||
|
|
||||||
#define STD_VIDEO_H264_CPB_CNT_LIST_SIZE 32
|
|
||||||
#define STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS 6
|
|
||||||
#define STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS 16
|
|
||||||
#define STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS 2
|
|
||||||
#define STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS 64
|
|
||||||
|
|
||||||
typedef enum StdVideoH264ChromaFormatIdc {
|
typedef enum StdVideoH264ChromaFormatIdc {
|
||||||
STD_VIDEO_H264_CHROMA_FORMAT_IDC_MONOCHROME = 0,
|
std_video_h264_chroma_format_idc_monochrome = 0,
|
||||||
STD_VIDEO_H264_CHROMA_FORMAT_IDC_420 = 1,
|
std_video_h264_chroma_format_idc_420 = 1,
|
||||||
STD_VIDEO_H264_CHROMA_FORMAT_IDC_422 = 2,
|
std_video_h264_chroma_format_idc_422 = 2,
|
||||||
STD_VIDEO_H264_CHROMA_FORMAT_IDC_444 = 3,
|
std_video_h264_chroma_format_idc_444 = 3,
|
||||||
STD_VIDEO_H264_CHROMA_FORMAT_IDC_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoH264ChromaFormatIdc;
|
} StdVideoH264ChromaFormatIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264ProfileIdc {
|
typedef enum StdVideoH264ProfileIdc {
|
||||||
STD_VIDEO_H264_PROFILE_IDC_BASELINE = 66, /* Only constrained baseline is supported */
|
std_video_h264_profile_idc_baseline = 66, /* Only constrained baseline is supported */
|
||||||
STD_VIDEO_H264_PROFILE_IDC_MAIN = 77,
|
std_video_h264_profile_idc_main = 77,
|
||||||
STD_VIDEO_H264_PROFILE_IDC_HIGH = 100,
|
std_video_h264_profile_idc_high = 100,
|
||||||
STD_VIDEO_H264_PROFILE_IDC_HIGH_444_PREDICTIVE = 244,
|
std_video_h264_profile_idc_high_444_predictive = 244,
|
||||||
STD_VIDEO_H264_PROFILE_IDC_INVALID = 0x7FFFFFFF
|
std_video_h264_profile_idc_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264ProfileIdc;
|
} StdVideoH264ProfileIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264Level {
|
typedef enum StdVideoH264Level {
|
||||||
STD_VIDEO_H264_LEVEL_1_0 = 0,
|
std_video_h264_level_1_0 = 0,
|
||||||
STD_VIDEO_H264_LEVEL_1_1 = 1,
|
std_video_h264_level_1_1 = 1,
|
||||||
STD_VIDEO_H264_LEVEL_1_2 = 2,
|
std_video_h264_level_1_2 = 2,
|
||||||
STD_VIDEO_H264_LEVEL_1_3 = 3,
|
std_video_h264_level_1_3 = 3,
|
||||||
STD_VIDEO_H264_LEVEL_2_0 = 4,
|
std_video_h264_level_2_0 = 4,
|
||||||
STD_VIDEO_H264_LEVEL_2_1 = 5,
|
std_video_h264_level_2_1 = 5,
|
||||||
STD_VIDEO_H264_LEVEL_2_2 = 6,
|
std_video_h264_level_2_2 = 6,
|
||||||
STD_VIDEO_H264_LEVEL_3_0 = 7,
|
std_video_h264_level_3_0 = 7,
|
||||||
STD_VIDEO_H264_LEVEL_3_1 = 8,
|
std_video_h264_level_3_1 = 8,
|
||||||
STD_VIDEO_H264_LEVEL_3_2 = 9,
|
std_video_h264_level_3_2 = 9,
|
||||||
STD_VIDEO_H264_LEVEL_4_0 = 10,
|
std_video_h264_level_4_0 = 10,
|
||||||
STD_VIDEO_H264_LEVEL_4_1 = 11,
|
std_video_h264_level_4_1 = 11,
|
||||||
STD_VIDEO_H264_LEVEL_4_2 = 12,
|
std_video_h264_level_4_2 = 12,
|
||||||
STD_VIDEO_H264_LEVEL_5_0 = 13,
|
std_video_h264_level_5_0 = 13,
|
||||||
STD_VIDEO_H264_LEVEL_5_1 = 14,
|
std_video_h264_level_5_1 = 14,
|
||||||
STD_VIDEO_H264_LEVEL_5_2 = 15,
|
std_video_h264_level_5_2 = 15,
|
||||||
STD_VIDEO_H264_LEVEL_6_0 = 16,
|
std_video_h264_level_6_0 = 16,
|
||||||
STD_VIDEO_H264_LEVEL_6_1 = 17,
|
std_video_h264_level_6_1 = 17,
|
||||||
STD_VIDEO_H264_LEVEL_6_2 = 18,
|
std_video_h264_level_6_2 = 18,
|
||||||
STD_VIDEO_H264_LEVEL_INVALID = 0x7FFFFFFF
|
std_video_h264_level_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264Level;
|
} StdVideoH264Level;
|
||||||
|
|
||||||
typedef enum StdVideoH264PocType {
|
typedef enum StdVideoH264PocType {
|
||||||
STD_VIDEO_H264_POC_TYPE_0 = 0,
|
std_video_h264_poc_type_0 = 0,
|
||||||
STD_VIDEO_H264_POC_TYPE_1 = 1,
|
std_video_h264_poc_type_1 = 1,
|
||||||
STD_VIDEO_H264_POC_TYPE_2 = 2,
|
std_video_h264_poc_type_2 = 2,
|
||||||
STD_VIDEO_H264_POC_TYPE_INVALID = 0x7FFFFFFF
|
std_video_h264_poc_type_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264PocType;
|
} StdVideoH264PocType;
|
||||||
|
|
||||||
typedef enum StdVideoH264AspectRatioIdc {
|
typedef enum StdVideoH264AspectRatioIdc {
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_UNSPECIFIED = 0,
|
std_video_h264_aspect_ratio_idc_unspecified = 0,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_SQUARE = 1,
|
std_video_h264_aspect_ratio_idc_square = 1,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_12_11 = 2,
|
std_video_h264_aspect_ratio_idc_12_11 = 2,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_10_11 = 3,
|
std_video_h264_aspect_ratio_idc_10_11 = 3,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_16_11 = 4,
|
std_video_h264_aspect_ratio_idc_16_11 = 4,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_40_33 = 5,
|
std_video_h264_aspect_ratio_idc_40_33 = 5,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_24_11 = 6,
|
std_video_h264_aspect_ratio_idc_24_11 = 6,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_20_11 = 7,
|
std_video_h264_aspect_ratio_idc_20_11 = 7,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_32_11 = 8,
|
std_video_h264_aspect_ratio_idc_32_11 = 8,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_80_33 = 9,
|
std_video_h264_aspect_ratio_idc_80_33 = 9,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_18_11 = 10,
|
std_video_h264_aspect_ratio_idc_18_11 = 10,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_15_11 = 11,
|
std_video_h264_aspect_ratio_idc_15_11 = 11,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_64_33 = 12,
|
std_video_h264_aspect_ratio_idc_64_33 = 12,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_160_99 = 13,
|
std_video_h264_aspect_ratio_idc_160_99 = 13,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_4_3 = 14,
|
std_video_h264_aspect_ratio_idc_4_3 = 14,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_3_2 = 15,
|
std_video_h264_aspect_ratio_idc_3_2 = 15,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_2_1 = 16,
|
std_video_h264_aspect_ratio_idc_2_1 = 16,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_EXTENDED_SAR = 255,
|
std_video_h264_aspect_ratio_idc_extended_sar = 255,
|
||||||
STD_VIDEO_H264_ASPECT_RATIO_IDC_INVALID = 0x7FFFFFFF
|
std_video_h264_aspect_ratio_idc_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264AspectRatioIdc;
|
} StdVideoH264AspectRatioIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264WeightedBipredIdc {
|
typedef enum StdVideoH264WeightedBiPredIdc {
|
||||||
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_DEFAULT = 0,
|
std_video_h264_default_weighted_b_slices_prediction_idc = 0,
|
||||||
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_EXPLICIT = 1,
|
std_video_h264_explicit_weighted_b_slices_prediction_idc = 1,
|
||||||
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_IMPLICIT = 2,
|
std_video_h264_implicit_weighted_b_slices_prediction_idc = 2,
|
||||||
STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_INVALID = 0x7FFFFFFF
|
std_video_h264_invalid_weighted_b_slices_prediction_idc = 0x7FFFFFFF
|
||||||
} StdVideoH264WeightedBipredIdc;
|
} StdVideoH264WeightedBiPredIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264ModificationOfPicNumsIdc {
|
typedef enum StdVideoH264ModificationOfPicNumsIdc {
|
||||||
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_SHORT_TERM_SUBTRACT = 0,
|
std_video_h264_modification_of_pic_nums_idc_short_term_subtract = 0,
|
||||||
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_SHORT_TERM_ADD = 1,
|
std_video_h264_modification_of_pic_nums_idc_short_term_add = 1,
|
||||||
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_LONG_TERM = 2,
|
std_video_h264_modification_of_pic_nums_idc_long_term = 2,
|
||||||
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_END = 3,
|
std_video_h264_modification_of_pic_nums_idc_end = 3,
|
||||||
STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_INVALID = 0x7FFFFFFF
|
std_video_h264_modification_of_pic_nums_idc_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264ModificationOfPicNumsIdc;
|
} StdVideoH264ModificationOfPicNumsIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264MemMgmtControlOp {
|
typedef enum StdVideoH264MemMgmtControlOp {
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_END = 0,
|
std_video_h264_mem_mgmt_control_op_end = 0,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_SHORT_TERM = 1,
|
std_video_h264_mem_mgmt_control_op_unmark_short_term = 1,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_LONG_TERM = 2,
|
std_video_h264_mem_mgmt_control_op_unmark_long_term = 2,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MARK_LONG_TERM = 3,
|
std_video_h264_mem_mgmt_control_op_mark_long_term = 3,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_SET_MAX_LONG_TERM_INDEX = 4,
|
std_video_h264_mem_mgmt_control_op_set_max_long_term_index = 4,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_ALL = 5,
|
std_video_h264_mem_mgmt_control_op_unmark_all = 5,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MARK_CURRENT_AS_LONG_TERM = 6,
|
std_video_h264_mem_mgmt_control_op_mark_current_as_long_term = 6,
|
||||||
STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_INVALID = 0x7FFFFFFF
|
std_video_h264_mem_mgmt_control_op_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264MemMgmtControlOp;
|
} StdVideoH264MemMgmtControlOp;
|
||||||
|
|
||||||
typedef enum StdVideoH264CabacInitIdc {
|
typedef enum StdVideoH264CabacInitIdc {
|
||||||
STD_VIDEO_H264_CABAC_INIT_IDC_0 = 0,
|
std_video_h264_cabac_init_idc_0 = 0,
|
||||||
STD_VIDEO_H264_CABAC_INIT_IDC_1 = 1,
|
std_video_h264_cabac_init_idc_1 = 1,
|
||||||
STD_VIDEO_H264_CABAC_INIT_IDC_2 = 2,
|
std_video_h264_cabac_init_idc_2 = 2,
|
||||||
STD_VIDEO_H264_CABAC_INIT_IDC_INVALID = 0x7FFFFFFF
|
std_video_h264_cabac_init_idc_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264CabacInitIdc;
|
} StdVideoH264CabacInitIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264DisableDeblockingFilterIdc {
|
typedef enum StdVideoH264DisableDeblockingFilterIdc {
|
||||||
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_DISABLED = 0,
|
std_video_h264_disable_deblocking_filter_idc_disabled = 0,
|
||||||
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_ENABLED = 1,
|
std_video_h264_disable_deblocking_filter_idc_enabled = 1,
|
||||||
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_PARTIAL = 2,
|
std_video_h264_disable_deblocking_filter_idc_partial = 2,
|
||||||
STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_INVALID = 0x7FFFFFFF
|
std_video_h264_disable_deblocking_filter_idc_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264DisableDeblockingFilterIdc;
|
} StdVideoH264DisableDeblockingFilterIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH264SliceType {
|
|
||||||
STD_VIDEO_H264_SLICE_TYPE_P = 0,
|
|
||||||
STD_VIDEO_H264_SLICE_TYPE_B = 1,
|
|
||||||
STD_VIDEO_H264_SLICE_TYPE_I = 2,
|
|
||||||
// reserved STD_VIDEO_H264_SLICE_TYPE_SP = 3,
|
|
||||||
// reserved STD_VIDEO_H264_SLICE_TYPE_SI = 4,
|
|
||||||
STD_VIDEO_H264_SLICE_TYPE_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoH264SliceType;
|
|
||||||
|
|
||||||
typedef enum StdVideoH264PictureType {
|
typedef enum StdVideoH264PictureType {
|
||||||
STD_VIDEO_H264_PICTURE_TYPE_P = 0,
|
std_video_h264_picture_type_i = 0,
|
||||||
STD_VIDEO_H264_PICTURE_TYPE_B = 1,
|
std_video_h264_picture_type_p = 1,
|
||||||
STD_VIDEO_H264_PICTURE_TYPE_I = 2,
|
std_video_h264_picture_type_b = 2,
|
||||||
// reserved STD_VIDEO_H264_PICTURE_TYPE_SP = 3,
|
std_video_h264_picture_type_invalid = 0x7FFFFFFF
|
||||||
// reserved STD_VIDEO_H264_PICTURE_TYPE_SI = 4,
|
|
||||||
STD_VIDEO_H264_PICTURE_TYPE_IDR = 5,
|
|
||||||
STD_VIDEO_H264_PICTURE_TYPE_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoH264PictureType;
|
} StdVideoH264PictureType;
|
||||||
|
|
||||||
|
typedef enum StdVideoH264SliceType {
|
||||||
|
std_video_h264_slice_type_i = 0,
|
||||||
|
std_video_h264_slice_type_p = 1,
|
||||||
|
std_video_h264_slice_type_b = 2,
|
||||||
|
std_video_h264_slice_type_invalid = 0x7FFFFFFF
|
||||||
|
} StdVideoH264SliceType;
|
||||||
|
|
||||||
typedef enum StdVideoH264NonVclNaluType {
|
typedef enum StdVideoH264NonVclNaluType {
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_SPS = 0,
|
std_video_h264_non_vcl_nalu_type_sps = 0,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_PPS = 1,
|
std_video_h264_non_vcl_nalu_type_pps = 1,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_AUD = 2,
|
std_video_h264_non_vcl_nalu_type_aud = 2,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_PREFIX = 3,
|
std_video_h264_non_vcl_nalu_type_prefix = 3,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_END_OF_SEQUENCE = 4,
|
std_video_h264_non_vcl_nalu_type_end_of_sequence = 4,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_END_OF_STREAM = 5,
|
std_video_h264_non_vcl_nalu_type_end_of_stream = 5,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_PRECODED = 6,
|
std_video_h264_non_vcl_nalu_type_precoded = 6,
|
||||||
STD_VIDEO_H264_NON_VCL_NALU_TYPE_INVALID = 0x7FFFFFFF
|
std_video_h264_non_vcl_nalu_type_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH264NonVclNaluType;
|
} StdVideoH264NonVclNaluType;
|
||||||
|
|
||||||
typedef struct StdVideoH264SpsVuiFlags {
|
typedef struct StdVideoH264SpsVuiFlags {
|
||||||
uint32_t aspect_ratio_info_present_flag : 1;
|
uint32_t aspect_ratio_info_present_flag:1;
|
||||||
uint32_t overscan_info_present_flag : 1;
|
uint32_t overscan_info_present_flag:1;
|
||||||
uint32_t overscan_appropriate_flag : 1;
|
uint32_t overscan_appropriate_flag:1;
|
||||||
uint32_t video_signal_type_present_flag : 1;
|
uint32_t video_signal_type_present_flag:1;
|
||||||
uint32_t video_full_range_flag : 1;
|
uint32_t video_full_range_flag:1;
|
||||||
uint32_t color_description_present_flag : 1;
|
uint32_t color_description_present_flag:1;
|
||||||
uint32_t chroma_loc_info_present_flag : 1;
|
uint32_t chroma_loc_info_present_flag:1;
|
||||||
uint32_t timing_info_present_flag : 1;
|
uint32_t timing_info_present_flag:1;
|
||||||
uint32_t fixed_frame_rate_flag : 1;
|
uint32_t fixed_frame_rate_flag:1;
|
||||||
uint32_t bitstream_restriction_flag : 1;
|
uint32_t bitstream_restriction_flag:1;
|
||||||
uint32_t nal_hrd_parameters_present_flag : 1;
|
uint32_t nal_hrd_parameters_present_flag:1;
|
||||||
uint32_t vcl_hrd_parameters_present_flag : 1;
|
uint32_t vcl_hrd_parameters_present_flag:1;
|
||||||
} StdVideoH264SpsVuiFlags;
|
} StdVideoH264SpsVuiFlags;
|
||||||
|
|
||||||
typedef struct StdVideoH264HrdParameters { // hrd_parameters
|
typedef struct StdVideoH264HrdParameters {
|
||||||
uint8_t cpb_cnt_minus1;
|
uint8_t cpb_cnt_minus1;
|
||||||
uint8_t bit_rate_scale;
|
uint8_t bit_rate_scale;
|
||||||
uint8_t cpb_size_scale;
|
uint8_t cpb_size_scale;
|
||||||
uint32_t bit_rate_value_minus1[STD_VIDEO_H264_CPB_CNT_LIST_SIZE]; // cpb_cnt_minus1 number of valid elements
|
uint32_t bit_rate_value_minus1[32];
|
||||||
uint32_t cpb_size_value_minus1[STD_VIDEO_H264_CPB_CNT_LIST_SIZE]; // cpb_cnt_minus1 number of valid elements
|
uint32_t cpb_size_value_minus1[32];
|
||||||
uint8_t cbr_flag[STD_VIDEO_H264_CPB_CNT_LIST_SIZE]; // cpb_cnt_minus1 number of valid elements
|
uint8_t cbr_flag[32];
|
||||||
uint32_t initial_cpb_removal_delay_length_minus1;
|
uint32_t initial_cpb_removal_delay_length_minus1;
|
||||||
uint32_t cpb_removal_delay_length_minus1;
|
uint32_t cpb_removal_delay_length_minus1;
|
||||||
uint32_t dpb_output_delay_length_minus1;
|
uint32_t dpb_output_delay_length_minus1;
|
||||||
|
@ -206,29 +194,30 @@ typedef struct StdVideoH264SequenceParameterSetVui {
|
||||||
uint8_t matrix_coefficients;
|
uint8_t matrix_coefficients;
|
||||||
uint32_t num_units_in_tick;
|
uint32_t num_units_in_tick;
|
||||||
uint32_t time_scale;
|
uint32_t time_scale;
|
||||||
StdVideoH264HrdParameters* pHrdParameters; // must be a valid ptr to hrd_parameters, if nal_hrd_parameters_present_flag or vcl_hrd_parameters_present_flag are set
|
StdVideoH264HrdParameters hrd_parameters;
|
||||||
uint8_t max_num_reorder_frames;
|
uint8_t num_reorder_frames;
|
||||||
uint8_t max_dec_frame_buffering;
|
uint8_t max_dec_frame_buffering;
|
||||||
StdVideoH264SpsVuiFlags flags;
|
StdVideoH264SpsVuiFlags flags;
|
||||||
} StdVideoH264SequenceParameterSetVui;
|
} StdVideoH264SequenceParameterSetVui;
|
||||||
|
|
||||||
typedef struct StdVideoH264SpsFlags {
|
typedef struct StdVideoH264SpsFlags {
|
||||||
uint32_t constraint_set0_flag : 1;
|
uint32_t constraint_set0_flag:1;
|
||||||
uint32_t constraint_set1_flag : 1;
|
uint32_t constraint_set1_flag:1;
|
||||||
uint32_t constraint_set2_flag : 1;
|
uint32_t constraint_set2_flag:1;
|
||||||
uint32_t constraint_set3_flag : 1;
|
uint32_t constraint_set3_flag:1;
|
||||||
uint32_t constraint_set4_flag : 1;
|
uint32_t constraint_set4_flag:1;
|
||||||
uint32_t constraint_set5_flag : 1;
|
uint32_t constraint_set5_flag:1;
|
||||||
uint32_t direct_8x8_inference_flag : 1;
|
uint32_t direct_8x8_inference_flag:1;
|
||||||
uint32_t mb_adaptive_frame_field_flag : 1;
|
uint32_t mb_adaptive_frame_field_flag:1;
|
||||||
uint32_t frame_mbs_only_flag : 1;
|
uint32_t frame_mbs_only_flag:1;
|
||||||
uint32_t delta_pic_order_always_zero_flag : 1;
|
uint32_t delta_pic_order_always_zero_flag:1;
|
||||||
uint32_t separate_colour_plane_flag : 1;
|
uint32_t residual_colour_transform_flag:1;
|
||||||
uint32_t gaps_in_frame_num_value_allowed_flag : 1;
|
uint32_t gaps_in_frame_num_value_allowed_flag:1;
|
||||||
uint32_t qpprime_y_zero_transform_bypass_flag : 1;
|
uint32_t first_picture_after_seek_flag:1; // where is this being documented?
|
||||||
uint32_t frame_cropping_flag : 1;
|
uint32_t qpprime_y_zero_transform_bypass_flag:1;
|
||||||
uint32_t seq_scaling_matrix_present_flag : 1;
|
uint32_t frame_cropping_flag:1;
|
||||||
uint32_t vui_parameters_present_flag : 1;
|
uint32_t scaling_matrix_present_flag:1;
|
||||||
|
uint32_t vui_parameters_present_flag:1;
|
||||||
} StdVideoH264SpsFlags;
|
} StdVideoH264SpsFlags;
|
||||||
|
|
||||||
typedef struct StdVideoH264ScalingLists
|
typedef struct StdVideoH264ScalingLists
|
||||||
|
@ -245,8 +234,8 @@ typedef struct StdVideoH264ScalingLists
|
||||||
// bit 0 - 5 are for each entry of ScalingList4x4
|
// bit 0 - 5 are for each entry of ScalingList4x4
|
||||||
// bit 6 - 7 are for each entry plus 6 for ScalingList8x8
|
// bit 6 - 7 are for each entry plus 6 for ScalingList8x8
|
||||||
uint8_t use_default_scaling_matrix_mask;
|
uint8_t use_default_scaling_matrix_mask;
|
||||||
uint8_t ScalingList4x4[STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS][STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS];
|
uint8_t ScalingList4x4[6][16];
|
||||||
uint8_t ScalingList8x8[STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS][STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS];
|
uint8_t ScalingList8x8[2][64];
|
||||||
} StdVideoH264ScalingLists;
|
} StdVideoH264ScalingLists;
|
||||||
|
|
||||||
typedef struct StdVideoH264SequenceParameterSet
|
typedef struct StdVideoH264SequenceParameterSet
|
||||||
|
@ -271,23 +260,21 @@ typedef struct StdVideoH264SequenceParameterSet
|
||||||
uint32_t frame_crop_top_offset;
|
uint32_t frame_crop_top_offset;
|
||||||
uint32_t frame_crop_bottom_offset;
|
uint32_t frame_crop_bottom_offset;
|
||||||
StdVideoH264SpsFlags flags;
|
StdVideoH264SpsFlags flags;
|
||||||
// pOffsetForRefFrame is a pointer representing the offset_for_ref_frame array with num_ref_frames_in_pic_order_cnt_cycle number of elements
|
int32_t offset_for_ref_frame[255]; // The number of valid values are defined by the num_ref_frames_in_pic_order_cnt_cycle
|
||||||
// If pOffsetForRefFrame has nullptr value, then num_ref_frames_in_pic_order_cnt_cycle must also be "0".
|
StdVideoH264ScalingLists* pScalingLists; // Must be a valid pointer if scaling_matrix_present_flag is set
|
||||||
int32_t* pOffsetForRefFrame;
|
|
||||||
StdVideoH264ScalingLists* pScalingLists; // Must be a valid pointer if seq_scaling_matrix_present_flag is set
|
|
||||||
StdVideoH264SequenceParameterSetVui* pSequenceParameterSetVui; // Must be a valid pointer if StdVideoH264SpsFlags:vui_parameters_present_flag is set
|
StdVideoH264SequenceParameterSetVui* pSequenceParameterSetVui; // Must be a valid pointer if StdVideoH264SpsFlags:vui_parameters_present_flag is set
|
||||||
} StdVideoH264SequenceParameterSet;
|
} StdVideoH264SequenceParameterSet;
|
||||||
|
|
||||||
typedef struct StdVideoH264PpsFlags {
|
typedef struct StdVideoH264PpsFlags {
|
||||||
uint32_t transform_8x8_mode_flag : 1;
|
uint32_t transform_8x8_mode_flag:1;
|
||||||
uint32_t redundant_pic_cnt_present_flag : 1;
|
uint32_t redundant_pic_cnt_present_flag:1;
|
||||||
uint32_t constrained_intra_pred_flag : 1;
|
uint32_t constrained_intra_pred_flag:1;
|
||||||
uint32_t deblocking_filter_control_present_flag : 1;
|
uint32_t deblocking_filter_control_present_flag:1;
|
||||||
uint32_t weighted_bipred_idc_flag : 1;
|
uint32_t weighted_bipred_idc_flag:1;
|
||||||
uint32_t weighted_pred_flag : 1;
|
uint32_t weighted_pred_flag:1;
|
||||||
uint32_t pic_order_present_flag : 1;
|
uint32_t pic_order_present_flag:1;
|
||||||
uint32_t entropy_coding_mode_flag : 1;
|
uint32_t entropy_coding_mode_flag:1;
|
||||||
uint32_t pic_scaling_matrix_present_flag : 1;
|
uint32_t scaling_matrix_present_flag:1;
|
||||||
} StdVideoH264PpsFlags;
|
} StdVideoH264PpsFlags;
|
||||||
|
|
||||||
typedef struct StdVideoH264PictureParameterSet
|
typedef struct StdVideoH264PictureParameterSet
|
||||||
|
@ -296,13 +283,13 @@ typedef struct StdVideoH264PictureParameterSet
|
||||||
uint8_t pic_parameter_set_id;
|
uint8_t pic_parameter_set_id;
|
||||||
uint8_t num_ref_idx_l0_default_active_minus1;
|
uint8_t num_ref_idx_l0_default_active_minus1;
|
||||||
uint8_t num_ref_idx_l1_default_active_minus1;
|
uint8_t num_ref_idx_l1_default_active_minus1;
|
||||||
StdVideoH264WeightedBipredIdc weighted_bipred_idc;
|
StdVideoH264WeightedBiPredIdc weighted_bipred_idc;
|
||||||
int8_t pic_init_qp_minus26;
|
int8_t pic_init_qp_minus26;
|
||||||
int8_t pic_init_qs_minus26;
|
int8_t pic_init_qs_minus26;
|
||||||
int8_t chroma_qp_index_offset;
|
int8_t chroma_qp_index_offset;
|
||||||
int8_t second_chroma_qp_index_offset;
|
int8_t second_chroma_qp_index_offset;
|
||||||
StdVideoH264PpsFlags flags;
|
StdVideoH264PpsFlags flags;
|
||||||
StdVideoH264ScalingLists* pScalingLists; // Must be a valid pointer if StdVideoH264PpsFlags::pic_scaling_matrix_present_flag is set.
|
StdVideoH264ScalingLists* pScalingLists; // Must be a valid pointer if StdVideoH264PpsFlags::scaling_matrix_present_flag is set.
|
||||||
} StdVideoH264PictureParameterSet;
|
} StdVideoH264PictureParameterSet;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -17,22 +17,12 @@ extern "C" {
|
||||||
// Video H.264 Decode related parameters:
|
// Video H.264 Decode related parameters:
|
||||||
// *************************************************
|
// *************************************************
|
||||||
|
|
||||||
#define STD_VIDEO_DECODE_H264_MVC_REF_LIST_SIZE 15
|
|
||||||
|
|
||||||
typedef enum StdVideoDecodeH264FieldOrderCount {
|
|
||||||
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_TOP = 0,
|
|
||||||
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_BOTTOM = 1,
|
|
||||||
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE = 2,
|
|
||||||
STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoDecodeH264FieldOrderCnt;
|
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264PictureInfoFlags {
|
typedef struct StdVideoDecodeH264PictureInfoFlags {
|
||||||
uint32_t field_pic_flag : 1; // Is field picture
|
uint32_t field_pic_flag:1; // Is field picture
|
||||||
uint32_t is_intra : 1; // Is intra picture
|
uint32_t is_intra:1; // Is intra picture
|
||||||
uint32_t IdrPicFlag : 1; // instantaneous decoding refresh (IDR) picture
|
uint32_t bottom_field_flag:1; // bottom (true) or top (false) field if field_pic_flag is set.
|
||||||
uint32_t bottom_field_flag : 1; // bottom (true) or top (false) field if field_pic_flag is set.
|
uint32_t is_reference:1; // This only applies to picture info, and not to the DPB lists.
|
||||||
uint32_t is_reference : 1; // This only applies to picture info, and not to the DPB lists.
|
uint32_t complementary_field_pair:1; // complementary field pair, complementary non-reference field pair, complementary reference field pair
|
||||||
uint32_t complementary_field_pair : 1; // complementary field pair, complementary non-reference field pair, complementary reference field pair
|
|
||||||
} StdVideoDecodeH264PictureInfoFlags;
|
} StdVideoDecodeH264PictureInfoFlags;
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264PictureInfo {
|
typedef struct StdVideoDecodeH264PictureInfo {
|
||||||
|
@ -42,15 +32,15 @@ typedef struct StdVideoDecodeH264PictureInfo {
|
||||||
uint16_t frame_num; // 7.4.3 Slice header semantics
|
uint16_t frame_num; // 7.4.3 Slice header semantics
|
||||||
uint16_t idr_pic_id; // 7.4.3 Slice header semantics
|
uint16_t idr_pic_id; // 7.4.3 Slice header semantics
|
||||||
// PicOrderCnt is based on TopFieldOrderCnt and BottomFieldOrderCnt. See 8.2.1 Decoding process for picture order count type 0 - 2
|
// PicOrderCnt is based on TopFieldOrderCnt and BottomFieldOrderCnt. See 8.2.1 Decoding process for picture order count type 0 - 2
|
||||||
int32_t PicOrderCnt[STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE]; // TopFieldOrderCnt and BottomFieldOrderCnt fields.
|
int32_t PicOrderCnt[2]; // TopFieldOrderCnt and BottomFieldOrderCnt fields.
|
||||||
StdVideoDecodeH264PictureInfoFlags flags;
|
StdVideoDecodeH264PictureInfoFlags flags;
|
||||||
} StdVideoDecodeH264PictureInfo;
|
} StdVideoDecodeH264PictureInfo;
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264ReferenceInfoFlags {
|
typedef struct StdVideoDecodeH264ReferenceInfoFlags {
|
||||||
uint32_t top_field_flag : 1; // Reference is used for top field reference.
|
uint32_t top_field_flag:1; // Reference is used for top field reference.
|
||||||
uint32_t bottom_field_flag : 1; // Reference is used for bottom field reference.
|
uint32_t bottom_field_flag:1; // Reference is used for bottom field reference.
|
||||||
uint32_t is_long_term : 1; // this is a long term reference
|
uint32_t is_long_term:1; // this is a long term reference
|
||||||
uint32_t is_non_existing : 1; // Must be handled in accordance with 8.2.5.2: Decoding process for gaps in frame_num
|
uint32_t is_non_existing:1; // Must be handled in accordance with 8.2.5.2: Decoding process for gaps in frame_num
|
||||||
} StdVideoDecodeH264ReferenceInfoFlags;
|
} StdVideoDecodeH264ReferenceInfoFlags;
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264ReferenceInfo {
|
typedef struct StdVideoDecodeH264ReferenceInfo {
|
||||||
|
@ -62,9 +52,9 @@ typedef struct StdVideoDecodeH264ReferenceInfo {
|
||||||
} StdVideoDecodeH264ReferenceInfo;
|
} StdVideoDecodeH264ReferenceInfo;
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264MvcElementFlags {
|
typedef struct StdVideoDecodeH264MvcElementFlags {
|
||||||
uint32_t non_idr : 1;
|
uint32_t non_idr:1;
|
||||||
uint32_t anchor_pic : 1;
|
uint32_t anchor_pic:1;
|
||||||
uint32_t inter_view : 1;
|
uint32_t inter_view:1;
|
||||||
} StdVideoDecodeH264MvcElementFlags;
|
} StdVideoDecodeH264MvcElementFlags;
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264MvcElement {
|
typedef struct StdVideoDecodeH264MvcElement {
|
||||||
|
@ -74,13 +64,13 @@ typedef struct StdVideoDecodeH264MvcElement {
|
||||||
uint16_t temporalId; // move out?
|
uint16_t temporalId; // move out?
|
||||||
uint16_t priorityId; // move out?
|
uint16_t priorityId; // move out?
|
||||||
uint16_t numOfAnchorRefsInL0;
|
uint16_t numOfAnchorRefsInL0;
|
||||||
uint16_t viewIdOfAnchorRefsInL0[STD_VIDEO_DECODE_H264_MVC_REF_LIST_SIZE];
|
uint16_t viewIdOfAnchorRefsInL0[15];
|
||||||
uint16_t numOfAnchorRefsInL1;
|
uint16_t numOfAnchorRefsInL1;
|
||||||
uint16_t viewIdOfAnchorRefsInL1[STD_VIDEO_DECODE_H264_MVC_REF_LIST_SIZE];
|
uint16_t viewIdOfAnchorRefsInL1[15];
|
||||||
uint16_t numOfNonAnchorRefsInL0;
|
uint16_t numOfNonAnchorRefsInL0;
|
||||||
uint16_t viewIdOfNonAnchorRefsInL0[STD_VIDEO_DECODE_H264_MVC_REF_LIST_SIZE];
|
uint16_t viewIdOfNonAnchorRefsInL0[15];
|
||||||
uint16_t numOfNonAnchorRefsInL1;
|
uint16_t numOfNonAnchorRefsInL1;
|
||||||
uint16_t viewIdOfNonAnchorRefsInL1[STD_VIDEO_DECODE_H264_MVC_REF_LIST_SIZE];
|
uint16_t viewIdOfNonAnchorRefsInL1[15];
|
||||||
} StdVideoDecodeH264MvcElement;
|
} StdVideoDecodeH264MvcElement;
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH264Mvc {
|
typedef struct StdVideoDecodeH264Mvc {
|
||||||
|
|
|
@ -18,24 +18,24 @@ extern "C" {
|
||||||
// *************************************************
|
// *************************************************
|
||||||
|
|
||||||
typedef struct StdVideoEncodeH264SliceHeaderFlags {
|
typedef struct StdVideoEncodeH264SliceHeaderFlags {
|
||||||
uint32_t idr_flag : 1;
|
uint32_t idr_flag:1;
|
||||||
uint32_t is_reference_flag : 1;
|
uint32_t is_reference_flag:1;
|
||||||
uint32_t num_ref_idx_active_override_flag : 1;
|
uint32_t num_ref_idx_active_override_flag:1;
|
||||||
uint32_t no_output_of_prior_pics_flag : 1;
|
uint32_t no_output_of_prior_pics_flag:1;
|
||||||
uint32_t long_term_reference_flag : 1;
|
uint32_t long_term_reference_flag:1;
|
||||||
uint32_t adaptive_ref_pic_marking_mode_flag : 1;
|
uint32_t adaptive_ref_pic_marking_mode_flag:1;
|
||||||
uint32_t no_prior_references_available_flag : 1;
|
uint32_t no_prior_references_available_flag:1;
|
||||||
} StdVideoEncodeH264SliceHeaderFlags;
|
} StdVideoEncodeH264SliceHeaderFlags;
|
||||||
|
|
||||||
typedef struct StdVideoEncodeH264PictureInfoFlags {
|
typedef struct StdVideoEncodeH264PictureInfoFlags {
|
||||||
uint32_t idr_flag : 1;
|
uint32_t idr_flag:1;
|
||||||
uint32_t is_reference_flag : 1;
|
uint32_t is_reference_flag:1;
|
||||||
uint32_t long_term_reference_flag : 1;
|
uint32_t long_term_reference_flag:1;
|
||||||
} StdVideoEncodeH264PictureInfoFlags;
|
} StdVideoEncodeH264PictureInfoFlags;
|
||||||
|
|
||||||
typedef struct StdVideoEncodeH264RefMgmtFlags {
|
typedef struct StdVideoEncodeH264RefMgmtFlags {
|
||||||
uint32_t ref_pic_list_modification_l0_flag : 1;
|
uint32_t ref_pic_list_modification_l0_flag:1;
|
||||||
uint32_t ref_pic_list_modification_l1_flag : 1;
|
uint32_t ref_pic_list_modification_l1_flag:1;
|
||||||
} StdVideoEncodeH264RefMgmtFlags;
|
} StdVideoEncodeH264RefMgmtFlags;
|
||||||
|
|
||||||
typedef struct StdVideoEncodeH264RefListModEntry {
|
typedef struct StdVideoEncodeH264RefListModEntry {
|
||||||
|
|
|
@ -14,89 +14,58 @@ extern "C" {
|
||||||
#include "vk_video/vulkan_video_codecs_common.h"
|
#include "vk_video/vulkan_video_codecs_common.h"
|
||||||
|
|
||||||
// Vulkan 0.5 version number WIP
|
// Vulkan 0.5 version number WIP
|
||||||
#define VK_STD_VULKAN_VIDEO_CODEC_H265_API_VERSION_0_9_5 VK_MAKE_VIDEO_STD_VERSION(0, 9, 5) // Patch version should always be set to 0
|
#define VK_STD_VULKAN_VIDEO_CODEC_H265_API_VERSION_0_5 VK_MAKE_VIDEO_STD_VERSION(0, 5, 0) // Patch version should always be set to 0
|
||||||
|
|
||||||
// Format must be in the form XX.XX where the first two digits are the major and the second two, the minor.
|
// Format must be in the form XX.XX where the first two digits are the major and the second two, the minor.
|
||||||
#define VK_STD_VULKAN_VIDEO_CODEC_H265_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H265_API_VERSION_0_9_5
|
#define VK_STD_VULKAN_VIDEO_CODEC_H265_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H265_API_VERSION_0_5
|
||||||
#define VK_STD_VULKAN_VIDEO_CODEC_H265_EXTENSION_NAME "VK_STD_vulkan_video_codec_h265"
|
#define VK_STD_VULKAN_VIDEO_CODEC_H265_EXTENSION_NAME "VK_STD_vulkan_video_codec_h265"
|
||||||
|
|
||||||
#define STD_VIDEO_H265_CPB_CNT_LIST_SIZE 32
|
|
||||||
#define STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE 7
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_4X4_NUM_LISTS 6
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_4X4_NUM_ELEMENTS 16
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_8X8_NUM_LISTS 6
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_8X8_NUM_ELEMENTS 64
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS 6
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_16X16_NUM_ELEMENTS 64
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS 2
|
|
||||||
#define STD_VIDEO_H265_SCALING_LIST_32X32_NUM_ELEMENTS 64
|
|
||||||
#define STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE 6
|
|
||||||
#define STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_COLS_LIST_SIZE 19
|
|
||||||
#define STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_ROWS_LIST_SIZE 21
|
|
||||||
#define STD_VIDEO_H265_PREDICTOR_PALETTE_COMPONENTS_LIST_SIZE 3
|
|
||||||
#define STD_VIDEO_H265_PREDICTOR_PALETTE_COMP_ENTRIES_LIST_SIZE 128
|
|
||||||
|
|
||||||
typedef enum StdVideoH265ChromaFormatIdc {
|
typedef enum StdVideoH265ChromaFormatIdc {
|
||||||
STD_VIDEO_H265_CHROMA_FORMAT_IDC_MONOCHROME = 0,
|
std_video_h265_chroma_format_idc_monochrome = 0,
|
||||||
STD_VIDEO_H265_CHROMA_FORMAT_IDC_420 = 1,
|
std_video_h265_chroma_format_idc_420 = 1,
|
||||||
STD_VIDEO_H265_CHROMA_FORMAT_IDC_422 = 2,
|
std_video_h265_chroma_format_idc_422 = 2,
|
||||||
STD_VIDEO_H265_CHROMA_FORMAT_IDC_444 = 3,
|
std_video_h265_chroma_format_idc_444 = 3,
|
||||||
STD_VIDEO_H265_CHROMA_FORMAT_IDC_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoH265ChromaFormatIdc;
|
} StdVideoH265ChromaFormatIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH265ProfileIdc {
|
typedef enum StdVideoH265ProfileIdc {
|
||||||
STD_VIDEO_H265_PROFILE_IDC_MAIN = 1,
|
std_video_h265_profile_idc_main = 1,
|
||||||
STD_VIDEO_H265_PROFILE_IDC_MAIN_10 = 2,
|
std_video_h265_profile_idc_main_10 = 2,
|
||||||
STD_VIDEO_H265_PROFILE_IDC_MAIN_STILL_PICTURE = 3,
|
std_video_h265_profile_idc_main_still_picture = 3,
|
||||||
STD_VIDEO_H265_PROFILE_IDC_FORMAT_RANGE_EXTENSIONS = 4,
|
std_video_h265_profile_idc_format_range_extensions = 4,
|
||||||
STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS = 9,
|
std_video_h265_profile_idc_scc_extensions = 9,
|
||||||
STD_VIDEO_H265_PROFILE_IDC_INVALID = 0x7FFFFFFF
|
std_video_h265_profile_idc_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH265ProfileIdc;
|
} StdVideoH265ProfileIdc;
|
||||||
|
|
||||||
typedef enum StdVideoH265Level {
|
typedef enum StdVideoH265Level {
|
||||||
STD_VIDEO_H265_LEVEL_1_0 = 0,
|
std_video_h265_level_1_0 = 0,
|
||||||
STD_VIDEO_H265_LEVEL_2_0 = 1,
|
std_video_h265_level_2_0 = 1,
|
||||||
STD_VIDEO_H265_LEVEL_2_1 = 2,
|
std_video_h265_level_2_1 = 2,
|
||||||
STD_VIDEO_H265_LEVEL_3_0 = 3,
|
std_video_h265_level_3_0 = 3,
|
||||||
STD_VIDEO_H265_LEVEL_3_1 = 4,
|
std_video_h265_level_3_1 = 4,
|
||||||
STD_VIDEO_H265_LEVEL_4_0 = 5,
|
std_video_h265_level_4_0 = 5,
|
||||||
STD_VIDEO_H265_LEVEL_4_1 = 6,
|
std_video_h265_level_4_1 = 6,
|
||||||
STD_VIDEO_H265_LEVEL_5_0 = 7,
|
std_video_h265_level_5_0 = 7,
|
||||||
STD_VIDEO_H265_LEVEL_5_1 = 8,
|
std_video_h265_level_5_1 = 8,
|
||||||
STD_VIDEO_H265_LEVEL_5_2 = 9,
|
std_video_h265_level_5_2 = 9,
|
||||||
STD_VIDEO_H265_LEVEL_6_0 = 10,
|
std_video_h265_level_6_0 = 10,
|
||||||
STD_VIDEO_H265_LEVEL_6_1 = 11,
|
std_video_h265_level_6_1 = 11,
|
||||||
STD_VIDEO_H265_LEVEL_6_2 = 12,
|
std_video_h265_level_6_2 = 12,
|
||||||
STD_VIDEO_H265_LEVEL_INVALID = 0x7FFFFFFF
|
std_video_h265_level_invalid = 0x7FFFFFFF
|
||||||
} StdVideoH265Level;
|
} StdVideoH265Level;
|
||||||
|
|
||||||
typedef enum StdVideoH265SliceType {
|
|
||||||
STD_VIDEO_H265_SLICE_TYPE_B = 0,
|
|
||||||
STD_VIDEO_H265_SLICE_TYPE_P = 1,
|
|
||||||
STD_VIDEO_H265_SLICE_TYPE_I = 2,
|
|
||||||
STD_VIDEO_H265_SLICE_TYPE_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoH265SliceType;
|
|
||||||
|
|
||||||
typedef enum StdVideoH265PictureType {
|
|
||||||
STD_VIDEO_H265_PICTURE_TYPE_P = 0,
|
|
||||||
STD_VIDEO_H265_PICTURE_TYPE_B = 1,
|
|
||||||
STD_VIDEO_H265_PICTURE_TYPE_I = 2,
|
|
||||||
STD_VIDEO_H265_PICTURE_TYPE_IDR = 3,
|
|
||||||
STD_VIDEO_H265_PICTURE_TYPE_INVALID = 0x7FFFFFFF
|
|
||||||
} StdVideoH265PictureType;
|
|
||||||
|
|
||||||
typedef struct StdVideoH265DecPicBufMgr
|
typedef struct StdVideoH265DecPicBufMgr
|
||||||
{
|
{
|
||||||
uint32_t max_latency_increase_plus1[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE];
|
uint32_t max_latency_increase_plus1[7];
|
||||||
uint8_t max_dec_pic_buffering_minus1[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE];
|
uint8_t max_dec_pic_buffering_minus1[7];
|
||||||
uint8_t max_num_reorder_pics[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE];
|
uint8_t max_num_reorder_pics[7];
|
||||||
} StdVideoH265DecPicBufMgr;
|
} StdVideoH265DecPicBufMgr;
|
||||||
|
|
||||||
typedef struct StdVideoH265SubLayerHrdParameters { // sub_layer_hrd_parameters
|
typedef struct StdVideoH265SubLayerHrdParameters {
|
||||||
uint32_t bit_rate_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
|
uint32_t bit_rate_value_minus1[32];
|
||||||
uint32_t cpb_size_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
|
uint32_t cpb_size_value_minus1[32];
|
||||||
uint32_t cpb_size_du_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
|
uint32_t cpb_size_du_value_minus1[32];
|
||||||
uint32_t bit_rate_du_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE];
|
uint32_t bit_rate_du_value_minus1[32];
|
||||||
uint32_t cbr_flag; // each bit represents a range of CpbCounts (bit 0 - cpb_cnt_minus1) per sub-layer
|
uint32_t cbr_flag; // each bit represents a range of CpbCounts (bit 0 - cpb_cnt_minus1) per sub-layer
|
||||||
} StdVideoH265SubLayerHrdParameters;
|
} StdVideoH265SubLayerHrdParameters;
|
||||||
|
|
||||||
|
@ -105,9 +74,9 @@ typedef struct StdVideoH265HrdFlags {
|
||||||
uint32_t vcl_hrd_parameters_present_flag : 1;
|
uint32_t vcl_hrd_parameters_present_flag : 1;
|
||||||
uint32_t sub_pic_hrd_params_present_flag : 1;
|
uint32_t sub_pic_hrd_params_present_flag : 1;
|
||||||
uint32_t sub_pic_cpb_params_in_pic_timing_sei_flag : 1;
|
uint32_t sub_pic_cpb_params_in_pic_timing_sei_flag : 1;
|
||||||
uint32_t fixed_pic_rate_general_flag : 8; // each bit represents a sublayer, bit 0 - vps_max_sub_layers_minus1
|
uint8_t fixed_pic_rate_general_flag; // each bit represents a sublayer, bit 0 - vps_max_sub_layers_minus1
|
||||||
uint32_t fixed_pic_rate_within_cvs_flag : 8; // each bit represents a sublayer, bit 0 - vps_max_sub_layers_minus1
|
uint8_t fixed_pic_rate_within_cvs_flag; // each bit represents a sublayer, bit 0 - vps_max_sub_layers_minus1
|
||||||
uint32_t low_delay_hrd_flag : 8; // each bit represents a sublayer, bit 0 - vps_max_sub_layers_minus1
|
uint8_t low_delay_hrd_flag; // each bit represents a sublayer, bit 0 - vps_max_sub_layers_minus1
|
||||||
} StdVideoH265HrdFlags;
|
} StdVideoH265HrdFlags;
|
||||||
|
|
||||||
typedef struct StdVideoH265HrdParameters {
|
typedef struct StdVideoH265HrdParameters {
|
||||||
|
@ -120,10 +89,10 @@ typedef struct StdVideoH265HrdParameters {
|
||||||
uint8_t initial_cpb_removal_delay_length_minus1;
|
uint8_t initial_cpb_removal_delay_length_minus1;
|
||||||
uint8_t au_cpb_removal_delay_length_minus1;
|
uint8_t au_cpb_removal_delay_length_minus1;
|
||||||
uint8_t dpb_output_delay_length_minus1;
|
uint8_t dpb_output_delay_length_minus1;
|
||||||
uint8_t cpb_cnt_minus1[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE];
|
uint8_t cpb_cnt_minus1[7];
|
||||||
uint16_t elemental_duration_in_tc_minus1[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE];
|
uint16_t elemental_duration_in_tc_minus1[7];
|
||||||
StdVideoH265SubLayerHrdParameters* pSubLayerHrdParametersNal[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE]; // NAL per layer ptr to sub_layer_hrd_parameters
|
StdVideoH265SubLayerHrdParameters* SubLayerHrdParametersNal[7];
|
||||||
StdVideoH265SubLayerHrdParameters* pSubLayerHrdParametersVcl[STD_VIDEO_H265_SUBLAYERS_MINUS1_LIST_SIZE]; // VCL per layer ptr to sub_layer_hrd_parameters
|
StdVideoH265SubLayerHrdParameters* SubLayerHrdParametersVcl[7];
|
||||||
StdVideoH265HrdFlags flags;
|
StdVideoH265HrdFlags flags;
|
||||||
} StdVideoH265HrdParameters;
|
} StdVideoH265HrdParameters;
|
||||||
|
|
||||||
|
@ -142,18 +111,18 @@ typedef struct StdVideoH265VideoParameterSet
|
||||||
uint32_t vps_time_scale;
|
uint32_t vps_time_scale;
|
||||||
uint32_t vps_num_ticks_poc_diff_one_minus1;
|
uint32_t vps_num_ticks_poc_diff_one_minus1;
|
||||||
StdVideoH265DecPicBufMgr* pDecPicBufMgr;
|
StdVideoH265DecPicBufMgr* pDecPicBufMgr;
|
||||||
StdVideoH265HrdParameters* pHrdParameters;
|
StdVideoH265HrdParameters* hrd_parameters;
|
||||||
StdVideoH265VpsFlags flags;
|
StdVideoH265VpsFlags flags;
|
||||||
} StdVideoH265VideoParameterSet;
|
} StdVideoH265VideoParameterSet;
|
||||||
|
|
||||||
typedef struct StdVideoH265ScalingLists
|
typedef struct StdVideoH265ScalingLists
|
||||||
{
|
{
|
||||||
uint8_t ScalingList4x4[STD_VIDEO_H265_SCALING_LIST_4X4_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_4X4_NUM_ELEMENTS]; // ScalingList[ 0 ][ MatrixID ][ i ] (sizeID = 0)
|
uint8_t ScalingList4x4[6][16]; // ScalingList[ 0 ][ MatrixID ][ i ] (sizeID = 0)
|
||||||
uint8_t ScalingList8x8[STD_VIDEO_H265_SCALING_LIST_8X8_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_8X8_NUM_ELEMENTS]; // ScalingList[ 1 ][ MatrixID ][ i ] (sizeID = 1)
|
uint8_t ScalingList8x8[6][64]; // ScalingList[ 1 ][ MatrixID ][ i ] (sizeID = 1)
|
||||||
uint8_t ScalingList16x16[STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_16X16_NUM_ELEMENTS]; // ScalingList[ 2 ][ MatrixID ][ i ] (sizeID = 2)
|
uint8_t ScalingList16x16[6][64]; // ScalingList[ 2 ][ MatrixID ][ i ] (sizeID = 2)
|
||||||
uint8_t ScalingList32x32[STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_32X32_NUM_ELEMENTS]; // ScalingList[ 3 ][ MatrixID ][ i ] (sizeID = 3)
|
uint8_t ScalingList32x32[2][64]; // ScalingList[ 3 ][ MatrixID ][ i ] (sizeID = 3)
|
||||||
uint8_t ScalingListDCCoef16x16[STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS]; // scaling_list_dc_coef_minus8[ sizeID - 2 ][ matrixID ] + 8, sizeID = 2
|
uint8_t ScalingListDCCoef16x16[6]; // scaling_list_dc_coef_minus8[ sizeID - 2 ][ matrixID ] + 8, sizeID = 2
|
||||||
uint8_t ScalingListDCCoef32x32[STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS]; // scaling_list_dc_coef_minus8[ sizeID - 2 ][ matrixID ] + 8. sizeID = 3
|
uint8_t ScalingListDCCoef32x32[2]; // scaling_list_dc_coef_minus8[ sizeID - 2 ][ matrixID ] + 8. sizeID = 3
|
||||||
} StdVideoH265ScalingLists;
|
} StdVideoH265ScalingLists;
|
||||||
|
|
||||||
typedef struct StdVideoH265SpsVuiFlags {
|
typedef struct StdVideoH265SpsVuiFlags {
|
||||||
|
@ -194,7 +163,7 @@ typedef struct StdVideoH265SequenceParameterSetVui {
|
||||||
uint32_t vui_num_units_in_tick;
|
uint32_t vui_num_units_in_tick;
|
||||||
uint32_t vui_time_scale;
|
uint32_t vui_time_scale;
|
||||||
uint32_t vui_num_ticks_poc_diff_one_minus1;
|
uint32_t vui_num_ticks_poc_diff_one_minus1;
|
||||||
StdVideoH265HrdParameters* pHrdParameters;
|
StdVideoH265HrdParameters* hrd_parameters;
|
||||||
uint16_t min_spatial_segmentation_idc;
|
uint16_t min_spatial_segmentation_idc;
|
||||||
uint8_t max_bytes_per_pic_denom;
|
uint8_t max_bytes_per_pic_denom;
|
||||||
uint8_t max_bits_per_min_cu_denom;
|
uint8_t max_bits_per_min_cu_denom;
|
||||||
|
@ -205,9 +174,10 @@ typedef struct StdVideoH265SequenceParameterSetVui {
|
||||||
|
|
||||||
typedef struct StdVideoH265PredictorPaletteEntries
|
typedef struct StdVideoH265PredictorPaletteEntries
|
||||||
{
|
{
|
||||||
uint16_t PredictorPaletteEntries[STD_VIDEO_H265_PREDICTOR_PALETTE_COMPONENTS_LIST_SIZE][STD_VIDEO_H265_PREDICTOR_PALETTE_COMP_ENTRIES_LIST_SIZE];
|
uint16_t PredictorPaletteEntries[3][128];
|
||||||
} StdVideoH265PredictorPaletteEntries;
|
} StdVideoH265PredictorPaletteEntries;
|
||||||
|
|
||||||
|
|
||||||
typedef struct StdVideoH265SpsFlags {
|
typedef struct StdVideoH265SpsFlags {
|
||||||
uint32_t sps_temporal_id_nesting_flag : 1;
|
uint32_t sps_temporal_id_nesting_flag : 1;
|
||||||
uint32_t separate_colour_plane_flag : 1;
|
uint32_t separate_colour_plane_flag : 1;
|
||||||
|
@ -224,7 +194,7 @@ typedef struct StdVideoH265SpsFlags {
|
||||||
uint32_t sps_extension_present_flag : 1;
|
uint32_t sps_extension_present_flag : 1;
|
||||||
uint32_t sps_range_extension_flag : 1;
|
uint32_t sps_range_extension_flag : 1;
|
||||||
|
|
||||||
// extension SPS flags, valid when STD_VIDEO_H265_PROFILE_IDC_FORMAT_RANGE_EXTENSIONS is set
|
// extension SPS flags, valid when std_video_h265_profile_idc_format_range_extensions is set
|
||||||
uint32_t transform_skip_rotation_enabled_flag : 1;
|
uint32_t transform_skip_rotation_enabled_flag : 1;
|
||||||
uint32_t transform_skip_context_enabled_flag : 1;
|
uint32_t transform_skip_context_enabled_flag : 1;
|
||||||
uint32_t implicit_rdpcm_enabled_flag : 1;
|
uint32_t implicit_rdpcm_enabled_flag : 1;
|
||||||
|
@ -235,7 +205,7 @@ typedef struct StdVideoH265SpsFlags {
|
||||||
uint32_t persistent_rice_adaptation_enabled_flag : 1;
|
uint32_t persistent_rice_adaptation_enabled_flag : 1;
|
||||||
uint32_t cabac_bypass_alignment_enabled_flag : 1;
|
uint32_t cabac_bypass_alignment_enabled_flag : 1;
|
||||||
|
|
||||||
// extension SPS flags, valid when STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS is set
|
// extension SPS flags, valid when std_video_h265_profile_idc_scc_extensions is set
|
||||||
uint32_t sps_curr_pic_ref_enabled_flag : 1;
|
uint32_t sps_curr_pic_ref_enabled_flag : 1;
|
||||||
uint32_t palette_mode_enabled_flag : 1;
|
uint32_t palette_mode_enabled_flag : 1;
|
||||||
uint32_t sps_palette_predictor_initializer_present_flag : 1;
|
uint32_t sps_palette_predictor_initializer_present_flag : 1;
|
||||||
|
@ -277,7 +247,7 @@ typedef struct StdVideoH265SequenceParameterSet
|
||||||
StdVideoH265ScalingLists* pScalingLists; // Must be a valid pointer if sps_scaling_list_data_present_flag is set
|
StdVideoH265ScalingLists* pScalingLists; // Must be a valid pointer if sps_scaling_list_data_present_flag is set
|
||||||
StdVideoH265SequenceParameterSetVui* pSequenceParameterSetVui; // Must be a valid pointer if StdVideoH265SpsFlags:vui_parameters_present_flag is set palette_max_size;
|
StdVideoH265SequenceParameterSetVui* pSequenceParameterSetVui; // Must be a valid pointer if StdVideoH265SpsFlags:vui_parameters_present_flag is set palette_max_size;
|
||||||
|
|
||||||
// extension SPS flags, valid when STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS is set
|
// extension SPS flags, valid when std_video_h265_profile_idc_scc_extensions is set
|
||||||
uint8_t palette_max_size;
|
uint8_t palette_max_size;
|
||||||
uint8_t delta_palette_max_predictor_size;
|
uint8_t delta_palette_max_predictor_size;
|
||||||
uint8_t motion_vector_resolution_control_idc;
|
uint8_t motion_vector_resolution_control_idc;
|
||||||
|
@ -311,11 +281,11 @@ typedef struct StdVideoH265PpsFlags {
|
||||||
uint32_t slice_segment_header_extension_present_flag : 1;
|
uint32_t slice_segment_header_extension_present_flag : 1;
|
||||||
uint32_t pps_extension_present_flag : 1;
|
uint32_t pps_extension_present_flag : 1;
|
||||||
|
|
||||||
// extension PPS flags, valid when STD_VIDEO_H265_PROFILE_IDC_FORMAT_RANGE_EXTENSIONS is set
|
// extension PPS flags, valid when std_video_h265_profile_idc_format_range_extensions is set
|
||||||
uint32_t cross_component_prediction_enabled_flag : 1;
|
uint32_t cross_component_prediction_enabled_flag : 1;
|
||||||
uint32_t chroma_qp_offset_list_enabled_flag : 1;
|
uint32_t chroma_qp_offset_list_enabled_flag : 1;
|
||||||
|
|
||||||
// extension PPS flags, valid when STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS is set
|
// extension PPS flags, valid when std_video_h265_profile_idc_scc_extensions is set
|
||||||
uint32_t pps_curr_pic_ref_enabled_flag : 1;
|
uint32_t pps_curr_pic_ref_enabled_flag : 1;
|
||||||
uint32_t residual_adaptive_colour_transform_enabled_flag : 1;
|
uint32_t residual_adaptive_colour_transform_enabled_flag : 1;
|
||||||
uint32_t pps_slice_act_qp_offsets_present_flag : 1;
|
uint32_t pps_slice_act_qp_offsets_present_flag : 1;
|
||||||
|
@ -337,24 +307,24 @@ typedef struct StdVideoH265PictureParameterSet
|
||||||
int8_t pps_cr_qp_offset;
|
int8_t pps_cr_qp_offset;
|
||||||
uint8_t num_tile_columns_minus1;
|
uint8_t num_tile_columns_minus1;
|
||||||
uint8_t num_tile_rows_minus1;
|
uint8_t num_tile_rows_minus1;
|
||||||
uint16_t column_width_minus1[STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_COLS_LIST_SIZE];
|
uint16_t column_width_minus1[19];
|
||||||
uint16_t row_height_minus1[STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_ROWS_LIST_SIZE];
|
uint16_t row_height_minus1[21];
|
||||||
int8_t pps_beta_offset_div2;
|
int8_t pps_beta_offset_div2;
|
||||||
int8_t pps_tc_offset_div2;
|
int8_t pps_tc_offset_div2;
|
||||||
uint8_t log2_parallel_merge_level_minus2;
|
uint8_t log2_parallel_merge_level_minus2;
|
||||||
StdVideoH265PpsFlags flags;
|
StdVideoH265PpsFlags flags;
|
||||||
StdVideoH265ScalingLists* pScalingLists; // Must be a valid pointer if pps_scaling_list_data_present_flag is set
|
StdVideoH265ScalingLists* pScalingLists; // Must be a valid pointer if pps_scaling_list_data_present_flag is set
|
||||||
|
|
||||||
// extension PPS, valid when STD_VIDEO_H265_PROFILE_IDC_FORMAT_RANGE_EXTENSIONS is set
|
// extension PPS, valid when std_video_h265_profile_idc_format_range_extensions is set
|
||||||
uint8_t log2_max_transform_skip_block_size_minus2;
|
uint8_t log2_max_transform_skip_block_size_minus2;
|
||||||
uint8_t diff_cu_chroma_qp_offset_depth;
|
uint8_t diff_cu_chroma_qp_offset_depth;
|
||||||
uint8_t chroma_qp_offset_list_len_minus1;
|
uint8_t chroma_qp_offset_list_len_minus1;
|
||||||
int8_t cb_qp_offset_list[STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE];
|
int8_t cb_qp_offset_list[6];
|
||||||
int8_t cr_qp_offset_list[STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE];
|
int8_t cr_qp_offset_list[6];
|
||||||
uint8_t log2_sao_offset_scale_luma;
|
uint8_t log2_sao_offset_scale_luma;
|
||||||
uint8_t log2_sao_offset_scale_chroma;
|
uint8_t log2_sao_offset_scale_chroma;
|
||||||
|
|
||||||
// extension PPS, valid when STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS is set
|
// extension PPS, valid when std_video_h265_profile_idc_scc_extensions is set
|
||||||
int8_t pps_act_y_qp_offset_plus5;
|
int8_t pps_act_y_qp_offset_plus5;
|
||||||
int8_t pps_act_cb_qp_offset_plus5;
|
int8_t pps_act_cb_qp_offset_plus5;
|
||||||
int8_t pps_act_cr_qp_offset_plus5;
|
int8_t pps_act_cr_qp_offset_plus5;
|
||||||
|
|
|
@ -17,8 +17,6 @@ extern "C" {
|
||||||
// Video h265 Decode related parameters:
|
// Video h265 Decode related parameters:
|
||||||
// *************************************************
|
// *************************************************
|
||||||
|
|
||||||
#define STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE 8
|
|
||||||
|
|
||||||
typedef struct StdVideoDecodeH265PictureInfoFlags {
|
typedef struct StdVideoDecodeH265PictureInfoFlags {
|
||||||
uint32_t IrapPicFlag : 1;
|
uint32_t IrapPicFlag : 1;
|
||||||
uint32_t IdrPicFlag : 1;
|
uint32_t IdrPicFlag : 1;
|
||||||
|
@ -35,14 +33,11 @@ typedef struct StdVideoDecodeH265PictureInfo {
|
||||||
uint16_t NumBitsForSTRefPicSetInSlice; // number of bits used in st_ref_pic_set()
|
uint16_t NumBitsForSTRefPicSetInSlice; // number of bits used in st_ref_pic_set()
|
||||||
//when short_term_ref_pic_set_sps_flag is 0; otherwise set to 0.
|
//when short_term_ref_pic_set_sps_flag is 0; otherwise set to 0.
|
||||||
uint8_t NumDeltaPocsOfRefRpsIdx; // NumDeltaPocs[ RefRpsIdx ] when short_term_ref_pic_set_sps_flag = 1, otherwise 0
|
uint8_t NumDeltaPocsOfRefRpsIdx; // NumDeltaPocs[ RefRpsIdx ] when short_term_ref_pic_set_sps_flag = 1, otherwise 0
|
||||||
uint8_t RefPicSetStCurrBefore[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE]; // slotIndex as used in
|
uint8_t RefPicSetStCurrBefore[8]; // slotIndex as used in VkVideoReferenceSlotKHR structures representing
|
||||||
// VkVideoReferenceSlotKHR structures representing
|
|
||||||
//pReferenceSlots in VkVideoDecodeInfoKHR, 0xff for invalid slotIndex
|
//pReferenceSlots in VkVideoDecodeInfoKHR, 0xff for invalid slotIndex
|
||||||
uint8_t RefPicSetStCurrAfter[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE]; // slotIndex as used in
|
uint8_t RefPicSetStCurrAfter[8]; // slotIndex as used in VkVideoReferenceSlotKHR structures representing
|
||||||
// VkVideoReferenceSlotKHR structures representing
|
|
||||||
//pReferenceSlots in VkVideoDecodeInfoKHR, 0xff for invalid slotIndex
|
//pReferenceSlots in VkVideoDecodeInfoKHR, 0xff for invalid slotIndex
|
||||||
uint8_t RefPicSetLtCurr[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE]; // slotIndex as used in
|
uint8_t RefPicSetLtCurr[8]; // slotIndex as used in VkVideoReferenceSlotKHR structures representing
|
||||||
// VkVideoReferenceSlotKHR structures representing
|
|
||||||
//pReferenceSlots in VkVideoDecodeInfoKHR, 0xff for invalid slotIndex
|
//pReferenceSlots in VkVideoDecodeInfoKHR, 0xff for invalid slotIndex
|
||||||
StdVideoDecodeH265PictureInfoFlags flags;
|
StdVideoDecodeH265PictureInfoFlags flags;
|
||||||
} StdVideoDecodeH265PictureInfo;
|
} StdVideoDecodeH265PictureInfo;
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
// Version 2 - Add Loader/ICD Interface version negotiation
|
// Version 2 - Add Loader/ICD Interface version negotiation
|
||||||
// via vk_icdNegotiateLoaderICDInterfaceVersion.
|
// via vk_icdNegotiateLoaderICDInterfaceVersion.
|
||||||
// Version 3 - Add ICD creation/destruction of KHR_surface objects.
|
// Version 3 - Add ICD creation/destruction of KHR_surface objects.
|
||||||
// Version 4 - Add unknown physical device extension querying via
|
// Version 4 - Add unknown physical device extension qyering via
|
||||||
// vk_icdGetPhysicalDeviceProcAddr.
|
// vk_icdGetPhysicalDeviceProcAddr.
|
||||||
// Version 5 - Tells ICDs that the loader is now paying attention to the
|
// Version 5 - Tells ICDs that the loader is now paying attention to the
|
||||||
// application version of Vulkan passed into the ApplicationInfo
|
// application version of Vulkan passed into the ApplicationInfo
|
||||||
|
|
|
@ -85,6 +85,7 @@
|
||||||
#include "vulkan_screen.h"
|
#include "vulkan_screen.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
||||||
#include "vulkan_beta.h"
|
#include "vulkan_beta.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
126475
externals/Vulkan-Headers/include/vulkan/vulkan.hpp
vendored
126475
externals/Vulkan-Headers/include/vulkan/vulkan.hpp
vendored
File diff suppressed because it is too large
Load diff
|
@ -44,7 +44,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
|
||||||
|
|
||||||
#define VK_ANDROID_external_memory_android_hardware_buffer 1
|
#define VK_ANDROID_external_memory_android_hardware_buffer 1
|
||||||
struct AHardwareBuffer;
|
struct AHardwareBuffer;
|
||||||
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 4
|
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3
|
||||||
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer"
|
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer"
|
||||||
typedef struct VkAndroidHardwareBufferUsageANDROID {
|
typedef struct VkAndroidHardwareBufferUsageANDROID {
|
||||||
VkStructureType sType;
|
VkStructureType sType;
|
||||||
|
@ -90,19 +90,6 @@ typedef struct VkExternalFormatANDROID {
|
||||||
uint64_t externalFormat;
|
uint64_t externalFormat;
|
||||||
} VkExternalFormatANDROID;
|
} VkExternalFormatANDROID;
|
||||||
|
|
||||||
typedef struct VkAndroidHardwareBufferFormatProperties2ANDROID {
|
|
||||||
VkStructureType sType;
|
|
||||||
void* pNext;
|
|
||||||
VkFormat format;
|
|
||||||
uint64_t externalFormat;
|
|
||||||
VkFormatFeatureFlags2KHR formatFeatures;
|
|
||||||
VkComponentMapping samplerYcbcrConversionComponents;
|
|
||||||
VkSamplerYcbcrModelConversion suggestedYcbcrModel;
|
|
||||||
VkSamplerYcbcrRange suggestedYcbcrRange;
|
|
||||||
VkChromaLocation suggestedXChromaOffset;
|
|
||||||
VkChromaLocation suggestedYChromaOffset;
|
|
||||||
} VkAndroidHardwareBufferFormatProperties2ANDROID;
|
|
||||||
|
|
||||||
typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties);
|
typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties);
|
||||||
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer);
|
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer);
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ extern "C" {
|
||||||
#define VK_KHR_video_queue 1
|
#define VK_KHR_video_queue 1
|
||||||
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionKHR)
|
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionKHR)
|
||||||
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionParametersKHR)
|
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionParametersKHR)
|
||||||
#define VK_KHR_VIDEO_QUEUE_SPEC_VERSION 2
|
#define VK_KHR_VIDEO_QUEUE_SPEC_VERSION 1
|
||||||
#define VK_KHR_VIDEO_QUEUE_EXTENSION_NAME "VK_KHR_video_queue"
|
#define VK_KHR_VIDEO_QUEUE_EXTENSION_NAME "VK_KHR_video_queue"
|
||||||
|
|
||||||
typedef enum VkQueryResultStatusKHR {
|
typedef enum VkQueryResultStatusKHR {
|
||||||
|
@ -37,9 +37,6 @@ typedef enum VkVideoCodecOperationFlagBitsKHR {
|
||||||
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
||||||
VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_EXT = 0x00010000,
|
VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_EXT = 0x00010000,
|
||||||
#endif
|
#endif
|
||||||
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
|
||||||
VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_EXT = 0x00020000,
|
|
||||||
#endif
|
|
||||||
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
#ifdef VK_ENABLE_BETA_EXTENSIONS
|
||||||
VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_EXT = 0x00000001,
|
VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_EXT = 0x00000001,
|
||||||
#endif
|
#endif
|
||||||
|
@ -69,12 +66,12 @@ typedef enum VkVideoComponentBitDepthFlagBitsKHR {
|
||||||
} VkVideoComponentBitDepthFlagBitsKHR;
|
} VkVideoComponentBitDepthFlagBitsKHR;
|
||||||
typedef VkFlags VkVideoComponentBitDepthFlagsKHR;
|
typedef VkFlags VkVideoComponentBitDepthFlagsKHR;
|
||||||
|
|
||||||
typedef enum VkVideoCapabilityFlagBitsKHR {
|
typedef enum VkVideoCapabilitiesFlagBitsKHR {
|
||||||
VK_VIDEO_CAPABILITY_PROTECTED_CONTENT_BIT_KHR = 0x00000001,
|
VK_VIDEO_CAPABILITIES_PROTECTED_CONTENT_BIT_KHR = 0x00000001,
|
||||||
VK_VIDEO_CAPABILITY_SEPARATE_REFERENCE_IMAGES_BIT_KHR = 0x00000002,
|
VK_VIDEO_CAPABILITIES_SEPARATE_REFERENCE_IMAGES_BIT_KHR = 0x00000002,
|
||||||
VK_VIDEO_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
|
VK_VIDEO_CAPABILITIES_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
|
||||||
} VkVideoCapabilityFlagBitsKHR;
|
} VkVideoCapabilitiesFlagBitsKHR;
|
||||||
typedef VkFlags VkVideoCapabilityFlagsKHR;
|
typedef VkFlags VkVideoCapabilitiesFlagsKHR;
|
||||||
|
|
||||||
typedef enum VkVideoSessionCreateFlagBitsKHR {
|
typedef enum VkVideoSessionCreateFlagBitsKHR {
|
||||||
VK_VIDEO_SESSION_CREATE_DEFAULT_KHR = 0,
|
VK_VIDEO_SESSION_CREATE_DEFAULT_KHR = 0,
|
||||||
|
@ -93,6 +90,7 @@ typedef enum VkVideoCodingControlFlagBitsKHR {
|
||||||
typedef VkFlags VkVideoCodingControlFlagsKHR;
|
typedef VkFlags VkVideoCodingControlFlagsKHR;
|
||||||
|
|
||||||
typedef enum VkVideoCodingQualityPresetFlagBitsKHR {
|
typedef enum VkVideoCodingQualityPresetFlagBitsKHR {
|
||||||
|
VK_VIDEO_CODING_QUALITY_PRESET_DEFAULT_BIT_KHR = 0,
|
||||||
VK_VIDEO_CODING_QUALITY_PRESET_NORMAL_BIT_KHR = 0x00000001,
|
VK_VIDEO_CODING_QUALITY_PRESET_NORMAL_BIT_KHR = 0x00000001,
|
||||||
VK_VIDEO_CODING_QUALITY_PRESET_POWER_BIT_KHR = 0x00000002,
|
VK_VIDEO_CODING_QUALITY_PRESET_POWER_BIT_KHR = 0x00000002,
|
||||||
VK_VIDEO_CODING_QUALITY_PRESET_QUALITY_BIT_KHR = 0x00000004,
|
VK_VIDEO_CODING_QUALITY_PRESET_QUALITY_BIT_KHR = 0x00000004,
|
||||||
|
@ -124,7 +122,7 @@ typedef struct VkVideoProfilesKHR {
|
||||||
typedef struct VkVideoCapabilitiesKHR {
|
typedef struct VkVideoCapabilitiesKHR {
|
||||||
VkStructureType sType;
|
VkStructureType sType;
|
||||||
void* pNext;
|
void* pNext;
|
||||||
VkVideoCapabilityFlagsKHR capabilityFlags;
|
VkVideoCapabilitiesFlagsKHR capabilityFlags;
|
||||||
VkDeviceSize minBitstreamBufferOffsetAlignment;
|
VkDeviceSize minBitstreamBufferOffsetAlignment;
|
||||||
VkDeviceSize minBitstreamBufferSizeAlignment;
|
VkDeviceSize minBitstreamBufferSizeAlignment;
|
||||||
VkExtent2D videoPictureExtentGranularity;
|
VkExtent2D videoPictureExtentGranularity;
|
||||||
|
@ -136,7 +134,7 @@ typedef struct VkVideoCapabilitiesKHR {
|
||||||
|
|
||||||
typedef struct VkPhysicalDeviceVideoFormatInfoKHR {
|
typedef struct VkPhysicalDeviceVideoFormatInfoKHR {
|
||||||
VkStructureType sType;
|
VkStructureType sType;
|
||||||
void* pNext;
|
const void* pNext;
|
||||||
VkImageUsageFlags imageUsage;
|
VkImageUsageFlags imageUsage;
|
||||||
const VkVideoProfilesKHR* pVideoProfiles;
|
const VkVideoProfilesKHR* pVideoProfiles;
|
||||||
} VkPhysicalDeviceVideoFormatInfoKHR;
|
} VkPhysicalDeviceVideoFormatInfoKHR;
|
||||||
|
@ -307,7 +305,7 @@ VKAPI_ATTR void VKAPI_CALL vkCmdControlVideoCodingKHR(
|
||||||
|
|
||||||
|
|
||||||
#define VK_KHR_video_decode_queue 1
|
#define VK_KHR_video_decode_queue 1
|
||||||
#define VK_KHR_VIDEO_DECODE_QUEUE_SPEC_VERSION 2
|
#define VK_KHR_VIDEO_DECODE_QUEUE_SPEC_VERSION 1
|
||||||
#define VK_KHR_VIDEO_DECODE_QUEUE_EXTENSION_NAME "VK_KHR_video_decode_queue"
|
#define VK_KHR_VIDEO_DECODE_QUEUE_EXTENSION_NAME "VK_KHR_video_decode_queue"
|
||||||
|
|
||||||
typedef enum VkVideoDecodeFlagBitsKHR {
|
typedef enum VkVideoDecodeFlagBitsKHR {
|
||||||
|
@ -372,7 +370,7 @@ typedef struct VkPhysicalDevicePortabilitySubsetPropertiesKHR {
|
||||||
|
|
||||||
|
|
||||||
#define VK_KHR_video_encode_queue 1
|
#define VK_KHR_video_encode_queue 1
|
||||||
#define VK_KHR_VIDEO_ENCODE_QUEUE_SPEC_VERSION 3
|
#define VK_KHR_VIDEO_ENCODE_QUEUE_SPEC_VERSION 2
|
||||||
#define VK_KHR_VIDEO_ENCODE_QUEUE_EXTENSION_NAME "VK_KHR_video_encode_queue"
|
#define VK_KHR_VIDEO_ENCODE_QUEUE_EXTENSION_NAME "VK_KHR_video_encode_queue"
|
||||||
|
|
||||||
typedef enum VkVideoEncodeFlagBitsKHR {
|
typedef enum VkVideoEncodeFlagBitsKHR {
|
||||||
|
@ -435,10 +433,10 @@ VKAPI_ATTR void VKAPI_CALL vkCmdEncodeVideoKHR(
|
||||||
#define VK_EXT_video_encode_h264 1
|
#define VK_EXT_video_encode_h264 1
|
||||||
#include "vk_video/vulkan_video_codec_h264std.h"
|
#include "vk_video/vulkan_video_codec_h264std.h"
|
||||||
#include "vk_video/vulkan_video_codec_h264std_encode.h"
|
#include "vk_video/vulkan_video_codec_h264std_encode.h"
|
||||||
#define VK_EXT_VIDEO_ENCODE_H264_SPEC_VERSION 2
|
#define VK_EXT_VIDEO_ENCODE_H264_SPEC_VERSION 1
|
||||||
#define VK_EXT_VIDEO_ENCODE_H264_EXTENSION_NAME "VK_EXT_video_encode_h264"
|
#define VK_EXT_VIDEO_ENCODE_H264_EXTENSION_NAME "VK_EXT_video_encode_h264"
|
||||||
|
|
||||||
typedef enum VkVideoEncodeH264CapabilityFlagBitsEXT {
|
typedef enum VkVideoEncodeH264CapabilitiesFlagBitsEXT {
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_CABAC_BIT_EXT = 0x00000001,
|
VK_VIDEO_ENCODE_H264_CAPABILITY_CABAC_BIT_EXT = 0x00000001,
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_CAVLC_BIT_EXT = 0x00000002,
|
VK_VIDEO_ENCODE_H264_CAPABILITY_CAVLC_BIT_EXT = 0x00000002,
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_WEIGHTED_BI_PRED_IMPLICIT_BIT_EXT = 0x00000004,
|
VK_VIDEO_ENCODE_H264_CAPABILITY_WEIGHTED_BI_PRED_IMPLICIT_BIT_EXT = 0x00000004,
|
||||||
|
@ -450,9 +448,9 @@ typedef enum VkVideoEncodeH264CapabilityFlagBitsEXT {
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_DEBLOCKING_FILTER_PARTIAL_BIT_EXT = 0x00000100,
|
VK_VIDEO_ENCODE_H264_CAPABILITY_DEBLOCKING_FILTER_PARTIAL_BIT_EXT = 0x00000100,
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_MULTIPLE_SLICE_PER_FRAME_BIT_EXT = 0x00000200,
|
VK_VIDEO_ENCODE_H264_CAPABILITY_MULTIPLE_SLICE_PER_FRAME_BIT_EXT = 0x00000200,
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_EVENLY_DISTRIBUTED_SLICE_SIZE_BIT_EXT = 0x00000400,
|
VK_VIDEO_ENCODE_H264_CAPABILITY_EVENLY_DISTRIBUTED_SLICE_SIZE_BIT_EXT = 0x00000400,
|
||||||
VK_VIDEO_ENCODE_H264_CAPABILITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
VK_VIDEO_ENCODE_H264_CAPABILITIES_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
||||||
} VkVideoEncodeH264CapabilityFlagBitsEXT;
|
} VkVideoEncodeH264CapabilitiesFlagBitsEXT;
|
||||||
typedef VkFlags VkVideoEncodeH264CapabilityFlagsEXT;
|
typedef VkFlags VkVideoEncodeH264CapabilitiesFlagsEXT;
|
||||||
|
|
||||||
typedef enum VkVideoEncodeH264InputModeFlagBitsEXT {
|
typedef enum VkVideoEncodeH264InputModeFlagBitsEXT {
|
||||||
VK_VIDEO_ENCODE_H264_INPUT_MODE_FRAME_BIT_EXT = 0x00000001,
|
VK_VIDEO_ENCODE_H264_INPUT_MODE_FRAME_BIT_EXT = 0x00000001,
|
||||||
|
@ -479,7 +477,7 @@ typedef VkFlags VkVideoEncodeH264CreateFlagsEXT;
|
||||||
typedef struct VkVideoEncodeH264CapabilitiesEXT {
|
typedef struct VkVideoEncodeH264CapabilitiesEXT {
|
||||||
VkStructureType sType;
|
VkStructureType sType;
|
||||||
const void* pNext;
|
const void* pNext;
|
||||||
VkVideoEncodeH264CapabilityFlagsEXT flags;
|
VkVideoEncodeH264CapabilitiesFlagsEXT flags;
|
||||||
VkVideoEncodeH264InputModeFlagsEXT inputModeFlags;
|
VkVideoEncodeH264InputModeFlagsEXT inputModeFlags;
|
||||||
VkVideoEncodeH264OutputModeFlagsEXT outputModeFlags;
|
VkVideoEncodeH264OutputModeFlagsEXT outputModeFlags;
|
||||||
VkExtent2D minPictureSizeInMbs;
|
VkExtent2D minPictureSizeInMbs;
|
||||||
|
@ -567,152 +565,24 @@ typedef struct VkVideoEncodeH264ProfileEXT {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define VK_EXT_video_encode_h265 1
|
|
||||||
#include "vk_video/vulkan_video_codec_h265std.h"
|
|
||||||
#include "vk_video/vulkan_video_codec_h265std_encode.h"
|
|
||||||
#define VK_EXT_VIDEO_ENCODE_H265_SPEC_VERSION 2
|
|
||||||
#define VK_EXT_VIDEO_ENCODE_H265_EXTENSION_NAME "VK_EXT_video_encode_h265"
|
|
||||||
typedef VkFlags VkVideoEncodeH265CapabilityFlagsEXT;
|
|
||||||
|
|
||||||
typedef enum VkVideoEncodeH265InputModeFlagBitsEXT {
|
|
||||||
VK_VIDEO_ENCODE_H265_INPUT_MODE_FRAME_BIT_EXT = 0x00000001,
|
|
||||||
VK_VIDEO_ENCODE_H265_INPUT_MODE_SLICE_BIT_EXT = 0x00000002,
|
|
||||||
VK_VIDEO_ENCODE_H265_INPUT_MODE_NON_VCL_BIT_EXT = 0x00000004,
|
|
||||||
VK_VIDEO_ENCODE_H265_INPUT_MODE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
|
||||||
} VkVideoEncodeH265InputModeFlagBitsEXT;
|
|
||||||
typedef VkFlags VkVideoEncodeH265InputModeFlagsEXT;
|
|
||||||
|
|
||||||
typedef enum VkVideoEncodeH265OutputModeFlagBitsEXT {
|
|
||||||
VK_VIDEO_ENCODE_H265_OUTPUT_MODE_FRAME_BIT_EXT = 0x00000001,
|
|
||||||
VK_VIDEO_ENCODE_H265_OUTPUT_MODE_SLICE_BIT_EXT = 0x00000002,
|
|
||||||
VK_VIDEO_ENCODE_H265_OUTPUT_MODE_NON_VCL_BIT_EXT = 0x00000004,
|
|
||||||
VK_VIDEO_ENCODE_H265_OUTPUT_MODE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
|
||||||
} VkVideoEncodeH265OutputModeFlagBitsEXT;
|
|
||||||
typedef VkFlags VkVideoEncodeH265OutputModeFlagsEXT;
|
|
||||||
typedef VkFlags VkVideoEncodeH265CreateFlagsEXT;
|
|
||||||
|
|
||||||
typedef enum VkVideoEncodeH265CtbSizeFlagBitsEXT {
|
|
||||||
VK_VIDEO_ENCODE_H265_CTB_SIZE_8_BIT_EXT = 0x00000001,
|
|
||||||
VK_VIDEO_ENCODE_H265_CTB_SIZE_16_BIT_EXT = 0x00000002,
|
|
||||||
VK_VIDEO_ENCODE_H265_CTB_SIZE_32_BIT_EXT = 0x00000004,
|
|
||||||
VK_VIDEO_ENCODE_H265_CTB_SIZE_64_BIT_EXT = 0x00000008,
|
|
||||||
VK_VIDEO_ENCODE_H265_CTB_SIZE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
|
||||||
} VkVideoEncodeH265CtbSizeFlagBitsEXT;
|
|
||||||
typedef VkFlags VkVideoEncodeH265CtbSizeFlagsEXT;
|
|
||||||
typedef struct VkVideoEncodeH265CapabilitiesEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkVideoEncodeH265CapabilityFlagsEXT flags;
|
|
||||||
VkVideoEncodeH265InputModeFlagsEXT inputModeFlags;
|
|
||||||
VkVideoEncodeH265OutputModeFlagsEXT outputModeFlags;
|
|
||||||
VkVideoEncodeH265CtbSizeFlagsEXT ctbSizes;
|
|
||||||
VkExtent2D inputImageDataAlignment;
|
|
||||||
uint8_t maxNumL0ReferenceForP;
|
|
||||||
uint8_t maxNumL0ReferenceForB;
|
|
||||||
uint8_t maxNumL1Reference;
|
|
||||||
uint8_t maxNumSubLayers;
|
|
||||||
uint8_t qualityLevelCount;
|
|
||||||
VkExtensionProperties stdExtensionVersion;
|
|
||||||
} VkVideoEncodeH265CapabilitiesEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265SessionCreateInfoEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkVideoEncodeH265CreateFlagsEXT flags;
|
|
||||||
const VkExtensionProperties* pStdExtensionVersion;
|
|
||||||
} VkVideoEncodeH265SessionCreateInfoEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265SessionParametersAddInfoEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint32_t vpsStdCount;
|
|
||||||
const StdVideoH265VideoParameterSet* pVpsStd;
|
|
||||||
uint32_t spsStdCount;
|
|
||||||
const StdVideoH265SequenceParameterSet* pSpsStd;
|
|
||||||
uint32_t ppsStdCount;
|
|
||||||
const StdVideoH265PictureParameterSet* pPpsStd;
|
|
||||||
} VkVideoEncodeH265SessionParametersAddInfoEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265SessionParametersCreateInfoEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint32_t maxVpsStdCount;
|
|
||||||
uint32_t maxSpsStdCount;
|
|
||||||
uint32_t maxPpsStdCount;
|
|
||||||
const VkVideoEncodeH265SessionParametersAddInfoEXT* pParametersAddInfo;
|
|
||||||
} VkVideoEncodeH265SessionParametersCreateInfoEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265DpbSlotInfoEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
int8_t slotIndex;
|
|
||||||
const StdVideoEncodeH265ReferenceInfo* pStdReferenceInfo;
|
|
||||||
} VkVideoEncodeH265DpbSlotInfoEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265ReferenceListsEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint8_t referenceList0EntryCount;
|
|
||||||
const VkVideoEncodeH265DpbSlotInfoEXT* pReferenceList0Entries;
|
|
||||||
uint8_t referenceList1EntryCount;
|
|
||||||
const VkVideoEncodeH265DpbSlotInfoEXT* pReferenceList1Entries;
|
|
||||||
const StdVideoEncodeH265ReferenceModifications* pReferenceModifications;
|
|
||||||
} VkVideoEncodeH265ReferenceListsEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265NaluSliceEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint32_t ctbCount;
|
|
||||||
const VkVideoEncodeH265ReferenceListsEXT* pReferenceFinalLists;
|
|
||||||
const StdVideoEncodeH265SliceHeader* pSliceHeaderStd;
|
|
||||||
} VkVideoEncodeH265NaluSliceEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265VclFrameInfoEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
const VkVideoEncodeH265ReferenceListsEXT* pReferenceFinalLists;
|
|
||||||
uint32_t naluSliceEntryCount;
|
|
||||||
const VkVideoEncodeH265NaluSliceEXT* pNaluSliceEntries;
|
|
||||||
const StdVideoEncodeH265PictureInfo* pCurrentPictureInfo;
|
|
||||||
} VkVideoEncodeH265VclFrameInfoEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265EmitPictureParametersEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint8_t vpsId;
|
|
||||||
uint8_t spsId;
|
|
||||||
VkBool32 emitVpsEnable;
|
|
||||||
VkBool32 emitSpsEnable;
|
|
||||||
uint32_t ppsIdEntryCount;
|
|
||||||
const uint8_t* ppsIdEntries;
|
|
||||||
} VkVideoEncodeH265EmitPictureParametersEXT;
|
|
||||||
|
|
||||||
typedef struct VkVideoEncodeH265ProfileEXT {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
StdVideoH265ProfileIdc stdProfileIdc;
|
|
||||||
} VkVideoEncodeH265ProfileEXT;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define VK_EXT_video_decode_h264 1
|
#define VK_EXT_video_decode_h264 1
|
||||||
#include "vk_video/vulkan_video_codec_h264std_decode.h"
|
#include "vk_video/vulkan_video_codec_h264std_decode.h"
|
||||||
#define VK_EXT_VIDEO_DECODE_H264_SPEC_VERSION 3
|
#define VK_EXT_VIDEO_DECODE_H264_SPEC_VERSION 1
|
||||||
#define VK_EXT_VIDEO_DECODE_H264_EXTENSION_NAME "VK_EXT_video_decode_h264"
|
#define VK_EXT_VIDEO_DECODE_H264_EXTENSION_NAME "VK_EXT_video_decode_h264"
|
||||||
|
|
||||||
typedef enum VkVideoDecodeH264PictureLayoutFlagBitsEXT {
|
typedef enum VkVideoDecodeH264FieldLayoutFlagBitsEXT {
|
||||||
VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_PROGRESSIVE_EXT = 0,
|
VK_VIDEO_DECODE_H264_PROGRESSIVE_PICTURES_ONLY_EXT = 0,
|
||||||
VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_INTERLEAVED_LINES_BIT_EXT = 0x00000001,
|
VK_VIDEO_DECODE_H264_FIELD_LAYOUT_LINE_INTERLACED_PLANE_BIT_EXT = 0x00000001,
|
||||||
VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_SEPARATE_PLANES_BIT_EXT = 0x00000002,
|
VK_VIDEO_DECODE_H264_FIELD_LAYOUT_SEPARATE_INTERLACED_PLANE_BIT_EXT = 0x00000002,
|
||||||
VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
VK_VIDEO_DECODE_H264_FIELD_LAYOUT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF
|
||||||
} VkVideoDecodeH264PictureLayoutFlagBitsEXT;
|
} VkVideoDecodeH264FieldLayoutFlagBitsEXT;
|
||||||
typedef VkFlags VkVideoDecodeH264PictureLayoutFlagsEXT;
|
typedef VkFlags VkVideoDecodeH264FieldLayoutFlagsEXT;
|
||||||
typedef VkFlags VkVideoDecodeH264CreateFlagsEXT;
|
typedef VkFlags VkVideoDecodeH264CreateFlagsEXT;
|
||||||
typedef struct VkVideoDecodeH264ProfileEXT {
|
typedef struct VkVideoDecodeH264ProfileEXT {
|
||||||
VkStructureType sType;
|
VkStructureType sType;
|
||||||
const void* pNext;
|
const void* pNext;
|
||||||
StdVideoH264ProfileIdc stdProfileIdc;
|
StdVideoH264ProfileIdc stdProfileIdc;
|
||||||
VkVideoDecodeH264PictureLayoutFlagsEXT pictureLayout;
|
VkVideoDecodeH264FieldLayoutFlagsEXT fieldLayout;
|
||||||
} VkVideoDecodeH264ProfileEXT;
|
} VkVideoDecodeH264ProfileEXT;
|
||||||
|
|
||||||
typedef struct VkVideoDecodeH264CapabilitiesEXT {
|
typedef struct VkVideoDecodeH264CapabilitiesEXT {
|
||||||
|
@ -770,6 +640,7 @@ typedef struct VkVideoDecodeH264DpbSlotInfoEXT {
|
||||||
|
|
||||||
|
|
||||||
#define VK_EXT_video_decode_h265 1
|
#define VK_EXT_video_decode_h265 1
|
||||||
|
#include "vk_video/vulkan_video_codec_h265std.h"
|
||||||
#include "vk_video/vulkan_video_codec_h265std_decode.h"
|
#include "vk_video/vulkan_video_codec_h265std_decode.h"
|
||||||
#define VK_EXT_VIDEO_DECODE_H265_SPEC_VERSION 1
|
#define VK_EXT_VIDEO_DECODE_H265_SPEC_VERSION 1
|
||||||
#define VK_EXT_VIDEO_DECODE_H265_EXTENSION_NAME "VK_EXT_video_decode_h265"
|
#define VK_EXT_VIDEO_DECODE_H265_EXTENSION_NAME "VK_EXT_video_decode_h265"
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -114,143 +114,6 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreZirconHandleFUCHSIA(
|
||||||
zx_handle_t* pZirconHandle);
|
zx_handle_t* pZirconHandle);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#define VK_FUCHSIA_buffer_collection 1
|
|
||||||
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferCollectionFUCHSIA)
|
|
||||||
#define VK_FUCHSIA_BUFFER_COLLECTION_SPEC_VERSION 2
|
|
||||||
#define VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME "VK_FUCHSIA_buffer_collection"
|
|
||||||
typedef VkFlags VkImageFormatConstraintsFlagsFUCHSIA;
|
|
||||||
|
|
||||||
typedef enum VkImageConstraintsInfoFlagBitsFUCHSIA {
|
|
||||||
VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA = 0x00000001,
|
|
||||||
VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA = 0x00000002,
|
|
||||||
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA = 0x00000004,
|
|
||||||
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA = 0x00000008,
|
|
||||||
VK_IMAGE_CONSTRAINTS_INFO_PROTECTED_OPTIONAL_FUCHSIA = 0x00000010,
|
|
||||||
VK_IMAGE_CONSTRAINTS_INFO_FLAG_BITS_MAX_ENUM_FUCHSIA = 0x7FFFFFFF
|
|
||||||
} VkImageConstraintsInfoFlagBitsFUCHSIA;
|
|
||||||
typedef VkFlags VkImageConstraintsInfoFlagsFUCHSIA;
|
|
||||||
typedef struct VkBufferCollectionCreateInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
zx_handle_t collectionToken;
|
|
||||||
} VkBufferCollectionCreateInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkImportMemoryBufferCollectionFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkBufferCollectionFUCHSIA collection;
|
|
||||||
uint32_t index;
|
|
||||||
} VkImportMemoryBufferCollectionFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkBufferCollectionImageCreateInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkBufferCollectionFUCHSIA collection;
|
|
||||||
uint32_t index;
|
|
||||||
} VkBufferCollectionImageCreateInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkBufferCollectionConstraintsInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint32_t minBufferCount;
|
|
||||||
uint32_t maxBufferCount;
|
|
||||||
uint32_t minBufferCountForCamping;
|
|
||||||
uint32_t minBufferCountForDedicatedSlack;
|
|
||||||
uint32_t minBufferCountForSharedSlack;
|
|
||||||
} VkBufferCollectionConstraintsInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkBufferConstraintsInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkBufferCreateInfo createInfo;
|
|
||||||
VkFormatFeatureFlags requiredFormatFeatures;
|
|
||||||
VkBufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints;
|
|
||||||
} VkBufferConstraintsInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkBufferCollectionBufferCreateInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkBufferCollectionFUCHSIA collection;
|
|
||||||
uint32_t index;
|
|
||||||
} VkBufferCollectionBufferCreateInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkSysmemColorSpaceFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint32_t colorSpace;
|
|
||||||
} VkSysmemColorSpaceFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkBufferCollectionPropertiesFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
void* pNext;
|
|
||||||
uint32_t memoryTypeBits;
|
|
||||||
uint32_t bufferCount;
|
|
||||||
uint32_t createInfoIndex;
|
|
||||||
uint64_t sysmemPixelFormat;
|
|
||||||
VkFormatFeatureFlags formatFeatures;
|
|
||||||
VkSysmemColorSpaceFUCHSIA sysmemColorSpaceIndex;
|
|
||||||
VkComponentMapping samplerYcbcrConversionComponents;
|
|
||||||
VkSamplerYcbcrModelConversion suggestedYcbcrModel;
|
|
||||||
VkSamplerYcbcrRange suggestedYcbcrRange;
|
|
||||||
VkChromaLocation suggestedXChromaOffset;
|
|
||||||
VkChromaLocation suggestedYChromaOffset;
|
|
||||||
} VkBufferCollectionPropertiesFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkImageFormatConstraintsInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
VkImageCreateInfo imageCreateInfo;
|
|
||||||
VkFormatFeatureFlags requiredFormatFeatures;
|
|
||||||
VkImageFormatConstraintsFlagsFUCHSIA flags;
|
|
||||||
uint64_t sysmemPixelFormat;
|
|
||||||
uint32_t colorSpaceCount;
|
|
||||||
const VkSysmemColorSpaceFUCHSIA* pColorSpaces;
|
|
||||||
} VkImageFormatConstraintsInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef struct VkImageConstraintsInfoFUCHSIA {
|
|
||||||
VkStructureType sType;
|
|
||||||
const void* pNext;
|
|
||||||
uint32_t formatConstraintsCount;
|
|
||||||
const VkImageFormatConstraintsInfoFUCHSIA* pFormatConstraints;
|
|
||||||
VkBufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints;
|
|
||||||
VkImageConstraintsInfoFlagsFUCHSIA flags;
|
|
||||||
} VkImageConstraintsInfoFUCHSIA;
|
|
||||||
|
|
||||||
typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferCollectionFUCHSIA)(VkDevice device, const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferCollectionFUCHSIA* pCollection);
|
|
||||||
typedef VkResult (VKAPI_PTR *PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo);
|
|
||||||
typedef VkResult (VKAPI_PTR *PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo);
|
|
||||||
typedef void (VKAPI_PTR *PFN_vkDestroyBufferCollectionFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, const VkAllocationCallbacks* pAllocator);
|
|
||||||
typedef VkResult (VKAPI_PTR *PFN_vkGetBufferCollectionPropertiesFUCHSIA)(VkDevice device, VkBufferCollectionFUCHSIA collection, VkBufferCollectionPropertiesFUCHSIA* pProperties);
|
|
||||||
|
|
||||||
#ifndef VK_NO_PROTOTYPES
|
|
||||||
VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferCollectionFUCHSIA(
|
|
||||||
VkDevice device,
|
|
||||||
const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo,
|
|
||||||
const VkAllocationCallbacks* pAllocator,
|
|
||||||
VkBufferCollectionFUCHSIA* pCollection);
|
|
||||||
|
|
||||||
VKAPI_ATTR VkResult VKAPI_CALL vkSetBufferCollectionImageConstraintsFUCHSIA(
|
|
||||||
VkDevice device,
|
|
||||||
VkBufferCollectionFUCHSIA collection,
|
|
||||||
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo);
|
|
||||||
|
|
||||||
VKAPI_ATTR VkResult VKAPI_CALL vkSetBufferCollectionBufferConstraintsFUCHSIA(
|
|
||||||
VkDevice device,
|
|
||||||
VkBufferCollectionFUCHSIA collection,
|
|
||||||
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo);
|
|
||||||
|
|
||||||
VKAPI_ATTR void VKAPI_CALL vkDestroyBufferCollectionFUCHSIA(
|
|
||||||
VkDevice device,
|
|
||||||
VkBufferCollectionFUCHSIA collection,
|
|
||||||
const VkAllocationCallbacks* pAllocator);
|
|
||||||
|
|
||||||
VKAPI_ATTR VkResult VKAPI_CALL vkGetBufferCollectionPropertiesFUCHSIA(
|
|
||||||
VkDevice device,
|
|
||||||
VkBufferCollectionFUCHSIA collection,
|
|
||||||
VkBufferCollectionPropertiesFUCHSIA* pProperties);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
43
externals/Vulkan-Headers/registry/generator.py
vendored
43
externals/Vulkan-Headers/registry/generator.py
vendored
|
@ -43,10 +43,7 @@ def enquote(s):
|
||||||
"""Return string argument with surrounding quotes,
|
"""Return string argument with surrounding quotes,
|
||||||
for serialization into Python code."""
|
for serialization into Python code."""
|
||||||
if s:
|
if s:
|
||||||
if isinstance(s, str):
|
|
||||||
return "'{}'".format(s)
|
return "'{}'".format(s)
|
||||||
else:
|
|
||||||
return s
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@ -122,11 +119,8 @@ class GeneratorOptions:
|
||||||
removeExtensions=None,
|
removeExtensions=None,
|
||||||
emitExtensions=None,
|
emitExtensions=None,
|
||||||
emitSpirv=None,
|
emitSpirv=None,
|
||||||
emitFormats=None,
|
|
||||||
reparentEnums=True,
|
reparentEnums=True,
|
||||||
sortProcedure=regSortFeatures,
|
sortProcedure=regSortFeatures):
|
||||||
requireCommandAliases=False,
|
|
||||||
):
|
|
||||||
"""Constructor.
|
"""Constructor.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
|
@ -158,8 +152,6 @@ class GeneratorOptions:
|
||||||
to None.
|
to None.
|
||||||
- emitSpirv - regex matching names of extensions and capabilities
|
- emitSpirv - regex matching names of extensions and capabilities
|
||||||
to actually emit interfaces for.
|
to actually emit interfaces for.
|
||||||
- emitFormats - regex matching names of formats to actually emit
|
|
||||||
interfaces for.
|
|
||||||
- reparentEnums - move <enum> elements which extend an enumerated
|
- reparentEnums - move <enum> elements which extend an enumerated
|
||||||
type from <feature> or <extension> elements to the target <enums>
|
type from <feature> or <extension> elements to the target <enums>
|
||||||
element. This is required for almost all purposes, but the
|
element. This is required for almost all purposes, but the
|
||||||
|
@ -225,10 +217,6 @@ class GeneratorOptions:
|
||||||
"""regex matching names of extensions and capabilities
|
"""regex matching names of extensions and capabilities
|
||||||
to actually emit interfaces for."""
|
to actually emit interfaces for."""
|
||||||
|
|
||||||
self.emitFormats = self.emptyRegex(emitFormats)
|
|
||||||
"""regex matching names of formats
|
|
||||||
to actually emit interfaces for."""
|
|
||||||
|
|
||||||
self.reparentEnums = reparentEnums
|
self.reparentEnums = reparentEnums
|
||||||
"""boolean specifying whether to remove <enum> elements from
|
"""boolean specifying whether to remove <enum> elements from
|
||||||
<feature> or <extension> when extending an <enums> type."""
|
<feature> or <extension> when extending an <enums> type."""
|
||||||
|
@ -242,10 +230,6 @@ class GeneratorOptions:
|
||||||
self.codeGenerator = False
|
self.codeGenerator = False
|
||||||
"""True if this generator makes compilable code"""
|
"""True if this generator makes compilable code"""
|
||||||
|
|
||||||
self.requireCommandAliases = requireCommandAliases
|
|
||||||
"""True if alias= attributes of <command> tags are transitively
|
|
||||||
required."""
|
|
||||||
|
|
||||||
def emptyRegex(self, pat):
|
def emptyRegex(self, pat):
|
||||||
"""Substitute a regular expression which matches no version
|
"""Substitute a regular expression which matches no version
|
||||||
or extension names for None or the empty string."""
|
or extension names for None or the empty string."""
|
||||||
|
@ -273,17 +257,6 @@ class OutputGenerator:
|
||||||
'basetype': 'basetypes',
|
'basetype': 'basetypes',
|
||||||
}
|
}
|
||||||
|
|
||||||
def breakName(self, name, msg):
|
|
||||||
"""Break into debugger if this is a special name"""
|
|
||||||
|
|
||||||
# List of string names to break on
|
|
||||||
bad = (
|
|
||||||
)
|
|
||||||
|
|
||||||
if name in bad and True:
|
|
||||||
print('breakName {}: {}'.format(name, msg))
|
|
||||||
pdb.set_trace()
|
|
||||||
|
|
||||||
def __init__(self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout):
|
def __init__(self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout):
|
||||||
"""Constructor
|
"""Constructor
|
||||||
|
|
||||||
|
@ -580,7 +553,7 @@ class OutputGenerator:
|
||||||
# Work around this by chasing the aliases to get the actual value.
|
# Work around this by chasing the aliases to get the actual value.
|
||||||
while numVal is None:
|
while numVal is None:
|
||||||
alias = self.registry.tree.find("enums/enum[@name='" + strVal + "']")
|
alias = self.registry.tree.find("enums/enum[@name='" + strVal + "']")
|
||||||
(numVal, strVal) = self.enumToValue(alias, True, bitwidth, True)
|
(numVal, strVal) = self.enumToValue(alias, True)
|
||||||
decl += "static const {} {} = {};\n".format(flagTypeName, name, strVal)
|
decl += "static const {} {} = {};\n".format(flagTypeName, name, strVal)
|
||||||
|
|
||||||
if numVal is not None:
|
if numVal is not None:
|
||||||
|
@ -805,6 +778,7 @@ class OutputGenerator:
|
||||||
self.warnFile.flush()
|
self.warnFile.flush()
|
||||||
if self.diagFile:
|
if self.diagFile:
|
||||||
self.diagFile.flush()
|
self.diagFile.flush()
|
||||||
|
self.outFile.flush()
|
||||||
if self.outFile != sys.stdout and self.outFile != sys.stderr:
|
if self.outFile != sys.stdout and self.outFile != sys.stderr:
|
||||||
self.outFile.close()
|
self.outFile.close()
|
||||||
|
|
||||||
|
@ -913,14 +887,6 @@ class OutputGenerator:
|
||||||
Extend to generate as desired in your derived class."""
|
Extend to generate as desired in your derived class."""
|
||||||
return
|
return
|
||||||
|
|
||||||
def genFormat(self, format, formatinfo, alias):
|
|
||||||
"""Generate interface for a format element.
|
|
||||||
|
|
||||||
- formatinfo - FormatInfo
|
|
||||||
|
|
||||||
Extend to generate as desired in your derived class."""
|
|
||||||
return
|
|
||||||
|
|
||||||
def makeProtoName(self, name, tail):
|
def makeProtoName(self, name, tail):
|
||||||
"""Turn a `<proto>` `<name>` into C-language prototype
|
"""Turn a `<proto>` `<name>` into C-language prototype
|
||||||
and typedef declarations for that name.
|
and typedef declarations for that name.
|
||||||
|
@ -973,9 +939,6 @@ class OutputGenerator:
|
||||||
|
|
||||||
# Clear prefix for subsequent iterations
|
# Clear prefix for subsequent iterations
|
||||||
prefix = ''
|
prefix = ''
|
||||||
|
|
||||||
paramdecl = paramdecl + prefix
|
|
||||||
|
|
||||||
if aligncol == 0:
|
if aligncol == 0:
|
||||||
# Squeeze out multiple spaces other than the indentation
|
# Squeeze out multiple spaces other than the indentation
|
||||||
paramdecl = indent + ' '.join(paramdecl.split())
|
paramdecl = indent + ' '.join(paramdecl.split())
|
||||||
|
|
60
externals/Vulkan-Headers/registry/genvk.py
vendored
60
externals/Vulkan-Headers/registry/genvk.py
vendored
|
@ -20,7 +20,6 @@ from generator import write
|
||||||
from spirvcapgenerator import SpirvCapabilityOutputGenerator
|
from spirvcapgenerator import SpirvCapabilityOutputGenerator
|
||||||
from hostsyncgenerator import HostSynchronizationOutputGenerator
|
from hostsyncgenerator import HostSynchronizationOutputGenerator
|
||||||
from pygenerator import PyOutputGenerator
|
from pygenerator import PyOutputGenerator
|
||||||
from rubygenerator import RubyOutputGenerator
|
|
||||||
from reflib import logDiag, logWarn, setLogFile
|
from reflib import logDiag, logWarn, setLogFile
|
||||||
from reg import Registry
|
from reg import Registry
|
||||||
from validitygenerator import ValidityOutputGenerator
|
from validitygenerator import ValidityOutputGenerator
|
||||||
|
@ -78,9 +77,6 @@ def makeGenOpts(args):
|
||||||
# SPIR-V capabilities / features to emit (list of extensions & capabilities)
|
# SPIR-V capabilities / features to emit (list of extensions & capabilities)
|
||||||
emitSpirv = args.emitSpirv
|
emitSpirv = args.emitSpirv
|
||||||
|
|
||||||
# Vulkan Formats to emit
|
|
||||||
emitFormats = args.emitFormats
|
|
||||||
|
|
||||||
# Features to include (list of features)
|
# Features to include (list of features)
|
||||||
features = args.feature
|
features = args.feature
|
||||||
|
|
||||||
|
@ -101,14 +97,13 @@ def makeGenOpts(args):
|
||||||
|
|
||||||
# Descriptive names for various regexp patterns used to select
|
# Descriptive names for various regexp patterns used to select
|
||||||
# versions and extensions
|
# versions and extensions
|
||||||
allFormats = allSpirv = allFeatures = allExtensions = r'.*'
|
allSpirv = allFeatures = allExtensions = r'.*'
|
||||||
|
|
||||||
# Turn lists of names/patterns into matching regular expressions
|
# Turn lists of names/patterns into matching regular expressions
|
||||||
addExtensionsPat = makeREstring(extensions, None)
|
addExtensionsPat = makeREstring(extensions, None)
|
||||||
removeExtensionsPat = makeREstring(removeExtensions, None)
|
removeExtensionsPat = makeREstring(removeExtensions, None)
|
||||||
emitExtensionsPat = makeREstring(emitExtensions, allExtensions)
|
emitExtensionsPat = makeREstring(emitExtensions, allExtensions)
|
||||||
emitSpirvPat = makeREstring(emitSpirv, allSpirv)
|
emitSpirvPat = makeREstring(emitSpirv, allSpirv)
|
||||||
emitFormatsPat = makeREstring(emitFormats, allFormats)
|
|
||||||
featuresPat = makeREstring(features, allFeatures)
|
featuresPat = makeREstring(features, allFeatures)
|
||||||
|
|
||||||
# Copyright text prefixing all headers (list of strings).
|
# Copyright text prefixing all headers (list of strings).
|
||||||
|
@ -187,32 +182,7 @@ def makeGenOpts(args):
|
||||||
reparentEnums = False)
|
reparentEnums = False)
|
||||||
]
|
]
|
||||||
|
|
||||||
# Ruby representation of API information, used by scripts that
|
|
||||||
# don't need to load the full XML.
|
|
||||||
genOpts['api.rb'] = [
|
|
||||||
RubyOutputGenerator,
|
|
||||||
DocGeneratorOptions(
|
|
||||||
conventions = conventions,
|
|
||||||
filename = 'api.rb',
|
|
||||||
directory = directory,
|
|
||||||
genpath = None,
|
|
||||||
apiname = 'vulkan',
|
|
||||||
profile = None,
|
|
||||||
versions = featuresPat,
|
|
||||||
emitversions = featuresPat,
|
|
||||||
defaultExtensions = None,
|
|
||||||
addExtensions = addExtensionsPat,
|
|
||||||
removeExtensions = removeExtensionsPat,
|
|
||||||
emitExtensions = emitExtensionsPat,
|
|
||||||
reparentEnums = False)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# API validity files for spec
|
# API validity files for spec
|
||||||
#
|
|
||||||
# requireCommandAliases is set to True because we need validity files
|
|
||||||
# for the command something is promoted to even when the promoted-to
|
|
||||||
# feature is not included. This avoids wordy includes of validity files.
|
|
||||||
genOpts['validinc'] = [
|
genOpts['validinc'] = [
|
||||||
ValidityOutputGenerator,
|
ValidityOutputGenerator,
|
||||||
DocGeneratorOptions(
|
DocGeneratorOptions(
|
||||||
|
@ -227,9 +197,7 @@ def makeGenOpts(args):
|
||||||
defaultExtensions = None,
|
defaultExtensions = None,
|
||||||
addExtensions = addExtensionsPat,
|
addExtensions = addExtensionsPat,
|
||||||
removeExtensions = removeExtensionsPat,
|
removeExtensions = removeExtensionsPat,
|
||||||
emitExtensions = emitExtensionsPat,
|
emitExtensions = emitExtensionsPat)
|
||||||
requireCommandAliases = True,
|
|
||||||
)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
# API host sync table files for spec
|
# API host sync table files for spec
|
||||||
|
@ -339,7 +307,6 @@ def makeGenOpts(args):
|
||||||
'VK_EXT_video_decode_h264',
|
'VK_EXT_video_decode_h264',
|
||||||
'VK_EXT_video_decode_h265',
|
'VK_EXT_video_decode_h265',
|
||||||
'VK_EXT_video_encode_h264',
|
'VK_EXT_video_encode_h264',
|
||||||
'VK_EXT_video_encode_h265',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
betaSuppressExtensions = []
|
betaSuppressExtensions = []
|
||||||
|
@ -347,13 +314,10 @@ def makeGenOpts(args):
|
||||||
platforms = [
|
platforms = [
|
||||||
[ 'vulkan_android.h', [ 'VK_KHR_android_surface',
|
[ 'vulkan_android.h', [ 'VK_KHR_android_surface',
|
||||||
'VK_ANDROID_external_memory_android_hardware_buffer'
|
'VK_ANDROID_external_memory_android_hardware_buffer'
|
||||||
], commonSuppressExtensions +
|
], commonSuppressExtensions ],
|
||||||
[ 'VK_KHR_format_feature_flags2',
|
|
||||||
] ],
|
|
||||||
[ 'vulkan_fuchsia.h', [ 'VK_FUCHSIA_imagepipe_surface',
|
[ 'vulkan_fuchsia.h', [ 'VK_FUCHSIA_imagepipe_surface',
|
||||||
'VK_FUCHSIA_external_memory',
|
'VK_FUCHSIA_external_memory',
|
||||||
'VK_FUCHSIA_external_semaphore',
|
'VK_FUCHSIA_external_semaphore' ], commonSuppressExtensions ],
|
||||||
'VK_FUCHSIA_buffer_collection' ], commonSuppressExtensions ],
|
|
||||||
[ 'vulkan_ggp.h', [ 'VK_GGP_stream_descriptor_surface',
|
[ 'vulkan_ggp.h', [ 'VK_GGP_stream_descriptor_surface',
|
||||||
'VK_GGP_frame_token' ], commonSuppressExtensions ],
|
'VK_GGP_frame_token' ], commonSuppressExtensions ],
|
||||||
[ 'vulkan_ios.h', [ 'VK_MVK_ios_surface' ], commonSuppressExtensions ],
|
[ 'vulkan_ios.h', [ 'VK_MVK_ios_surface' ], commonSuppressExtensions ],
|
||||||
|
@ -582,8 +546,6 @@ def genTarget(args):
|
||||||
logDiag('* options.addExtensions =', options.addExtensions)
|
logDiag('* options.addExtensions =', options.addExtensions)
|
||||||
logDiag('* options.removeExtensions =', options.removeExtensions)
|
logDiag('* options.removeExtensions =', options.removeExtensions)
|
||||||
logDiag('* options.emitExtensions =', options.emitExtensions)
|
logDiag('* options.emitExtensions =', options.emitExtensions)
|
||||||
logDiag('* options.emitSpirv =', options.emitSpirv)
|
|
||||||
logDiag('* options.emitFormats =', options.emitFormats)
|
|
||||||
|
|
||||||
gen = createGenerator(errFile=errWarn,
|
gen = createGenerator(errFile=errWarn,
|
||||||
warnFile=errWarn,
|
warnFile=errWarn,
|
||||||
|
@ -616,9 +578,6 @@ if __name__ == '__main__':
|
||||||
parser.add_argument('-emitSpirv', action='append',
|
parser.add_argument('-emitSpirv', action='append',
|
||||||
default=[],
|
default=[],
|
||||||
help='Specify a SPIR-V extension or capability to emit in targets')
|
help='Specify a SPIR-V extension or capability to emit in targets')
|
||||||
parser.add_argument('-emitFormats', action='append',
|
|
||||||
default=[],
|
|
||||||
help='Specify Vulkan Formats to emit in targets')
|
|
||||||
parser.add_argument('-feature', action='append',
|
parser.add_argument('-feature', action='append',
|
||||||
default=[],
|
default=[],
|
||||||
help='Specify a core API feature name or names to add to targets')
|
help='Specify a core API feature name or names to add to targets')
|
||||||
|
@ -642,7 +601,7 @@ if __name__ == '__main__':
|
||||||
parser.add_argument('-time', action='store_true',
|
parser.add_argument('-time', action='store_true',
|
||||||
help='Enable timing')
|
help='Enable timing')
|
||||||
parser.add_argument('-validate', action='store_true',
|
parser.add_argument('-validate', action='store_true',
|
||||||
help='Validate the registry properties and exit')
|
help='Enable XML group validation')
|
||||||
parser.add_argument('-genpath', action='store', default='gen',
|
parser.add_argument('-genpath', action='store', default='gen',
|
||||||
help='Path to generated files')
|
help='Path to generated files')
|
||||||
parser.add_argument('-o', action='store', dest='directory',
|
parser.add_argument('-o', action='store', dest='directory',
|
||||||
|
@ -676,12 +635,6 @@ if __name__ == '__main__':
|
||||||
else:
|
else:
|
||||||
diag = None
|
diag = None
|
||||||
|
|
||||||
if args.time:
|
|
||||||
# Log diagnostics and warnings
|
|
||||||
setLogFile(setDiag = True, setWarn = True, filename = '-')
|
|
||||||
|
|
||||||
(gen, options) = (None, None)
|
|
||||||
if not args.validate:
|
|
||||||
# Create the API generator & generator options
|
# Create the API generator & generator options
|
||||||
(gen, options) = genTarget(args)
|
(gen, options) = genTarget(args)
|
||||||
|
|
||||||
|
@ -700,8 +653,7 @@ if __name__ == '__main__':
|
||||||
endTimer(args.time, '* Time to parse ElementTree =')
|
endTimer(args.time, '* Time to parse ElementTree =')
|
||||||
|
|
||||||
if args.validate:
|
if args.validate:
|
||||||
success = reg.validateRegistry()
|
reg.validateGroups()
|
||||||
sys.exit(0 if success else 1)
|
|
||||||
|
|
||||||
if args.dump:
|
if args.dump:
|
||||||
logDiag('* Dumping registry to regdump.txt')
|
logDiag('* Dumping registry to regdump.txt')
|
||||||
|
|
197
externals/Vulkan-Headers/registry/reg.py
vendored
197
externals/Vulkan-Headers/registry/reg.py
vendored
|
@ -260,12 +260,6 @@ class SpirvInfo(BaseInfo):
|
||||||
def __init__(self, elem):
|
def __init__(self, elem):
|
||||||
BaseInfo.__init__(self, elem)
|
BaseInfo.__init__(self, elem)
|
||||||
|
|
||||||
class FormatInfo(BaseInfo):
|
|
||||||
"""Registry information about an API <format>."""
|
|
||||||
|
|
||||||
def __init__(self, elem):
|
|
||||||
BaseInfo.__init__(self, elem)
|
|
||||||
|
|
||||||
class Registry:
|
class Registry:
|
||||||
"""Object representing an API registry, loaded from an XML file."""
|
"""Object representing an API registry, loaded from an XML file."""
|
||||||
|
|
||||||
|
@ -317,9 +311,6 @@ class Registry:
|
||||||
self.spirvcapdict = {}
|
self.spirvcapdict = {}
|
||||||
"dictionary of FeatureInfo objects for `<spirvcapability>` elements keyed by spirv capability name"
|
"dictionary of FeatureInfo objects for `<spirvcapability>` elements keyed by spirv capability name"
|
||||||
|
|
||||||
self.formatsdict = {}
|
|
||||||
"dictionary of FeatureInfo objects for `<format>` elements keyed by VkFormat name"
|
|
||||||
|
|
||||||
self.emitFeatures = False
|
self.emitFeatures = False
|
||||||
"""True to actually emit features for a version / extension,
|
"""True to actually emit features for a version / extension,
|
||||||
or False to just treat them as emitted"""
|
or False to just treat them as emitted"""
|
||||||
|
@ -365,10 +356,10 @@ class Registry:
|
||||||
|
|
||||||
Intended for internal use only.
|
Intended for internal use only.
|
||||||
|
|
||||||
- elem - `<type>`/`<enums>`/`<enum>`/`<command>`/`<feature>`/`<extension>`/`<spirvextension>`/`<spirvcapability>`/`<format>` Element
|
- elem - `<type>`/`<enums>`/`<enum>`/`<command>`/`<feature>`/`<extension>`/`<spirvextension>`/`<spirvcapability>` Element
|
||||||
- info - corresponding {Type|Group|Enum|Cmd|Feature|Spirv}Info object
|
- info - corresponding {Type|Group|Enum|Cmd|Feature|Spirv}Info object
|
||||||
- infoName - 'type' / 'group' / 'enum' / 'command' / 'feature' / 'extension' / 'spirvextension' / 'spirvcapability' / 'format'
|
- infoName - 'type' / 'group' / 'enum' / 'command' / 'feature' / 'extension' / 'spirvextension' / 'spirvcapability'
|
||||||
- dictionary - self.{type|group|enum|cmd|api|ext|format|spirvext|spirvcap}dict
|
- dictionary - self.{type|group|enum|cmd|api|ext|spirvext|spirvcap}dict
|
||||||
|
|
||||||
If the Element has an 'api' attribute, the dictionary key is the
|
If the Element has an 'api' attribute, the dictionary key is the
|
||||||
tuple (name,api). If not, the key is the name. 'name' is an
|
tuple (name,api). If not, the key is the name. 'name' is an
|
||||||
|
@ -621,10 +612,6 @@ class Registry:
|
||||||
spirvInfo = SpirvInfo(spirv)
|
spirvInfo = SpirvInfo(spirv)
|
||||||
self.addElementInfo(spirv, spirvInfo, 'spirvcapability', self.spirvcapdict)
|
self.addElementInfo(spirv, spirvInfo, 'spirvcapability', self.spirvcapdict)
|
||||||
|
|
||||||
for format in self.reg.findall('formats/format'):
|
|
||||||
formatInfo = FormatInfo(format)
|
|
||||||
self.addElementInfo(format, formatInfo, 'format', self.formatsdict)
|
|
||||||
|
|
||||||
def dumpReg(self, maxlen=120, filehandle=sys.stdout):
|
def dumpReg(self, maxlen=120, filehandle=sys.stdout):
|
||||||
"""Dump all the dictionaries constructed from the Registry object.
|
"""Dump all the dictionaries constructed from the Registry object.
|
||||||
|
|
||||||
|
@ -664,10 +651,6 @@ class Registry:
|
||||||
for key in self.spirvcapdict:
|
for key in self.spirvcapdict:
|
||||||
write(' SPIR-V Capability', key, '->',
|
write(' SPIR-V Capability', key, '->',
|
||||||
etree.tostring(self.spirvcapdict[key].elem)[0:maxlen], file=filehandle)
|
etree.tostring(self.spirvcapdict[key].elem)[0:maxlen], file=filehandle)
|
||||||
write('// VkFormat', file=filehandle)
|
|
||||||
for key in self.formatsdict:
|
|
||||||
write(' VkFormat', key, '->',
|
|
||||||
etree.tostring(self.formatsdict[key].elem)[0:maxlen], file=filehandle)
|
|
||||||
|
|
||||||
def markTypeRequired(self, typename, required):
|
def markTypeRequired(self, typename, required):
|
||||||
"""Require (along with its dependencies) or remove (but not its dependencies) a type.
|
"""Require (along with its dependencies) or remove (but not its dependencies) a type.
|
||||||
|
@ -779,23 +762,12 @@ class Registry:
|
||||||
cmd = self.lookupElementInfo(cmdname, self.cmddict)
|
cmd = self.lookupElementInfo(cmdname, self.cmddict)
|
||||||
if cmd is not None:
|
if cmd is not None:
|
||||||
cmd.required = required
|
cmd.required = required
|
||||||
|
|
||||||
# Tag command dependencies in 'alias' attribute as required
|
# Tag command dependencies in 'alias' attribute as required
|
||||||
#
|
|
||||||
# This is usually not done, because command 'aliases' are not
|
|
||||||
# actual C language aliases like type and enum aliases. Instead
|
|
||||||
# they are just duplicates of the function signature of the
|
|
||||||
# alias. This means that there is no dependency of a command
|
|
||||||
# alias on what it aliases. One exception is validity includes,
|
|
||||||
# where the spec markup needs the promoted-to validity include
|
|
||||||
# even if only the promoted-from command is being built.
|
|
||||||
if self.genOpts.requireCommandAliases:
|
|
||||||
depname = cmd.elem.get('alias')
|
depname = cmd.elem.get('alias')
|
||||||
if depname:
|
if depname:
|
||||||
self.gen.logMsg('diag', 'Generating dependent command',
|
self.gen.logMsg('diag', 'Generating dependent command',
|
||||||
depname, 'for alias', cmdname)
|
depname, 'for alias', cmdname)
|
||||||
self.markCmdRequired(depname, required)
|
self.markCmdRequired(depname, required)
|
||||||
|
|
||||||
# Tag all parameter types of this command as required.
|
# Tag all parameter types of this command as required.
|
||||||
# This DOES NOT remove types of commands in a <remove>
|
# This DOES NOT remove types of commands in a <remove>
|
||||||
# tag, because many other commands may use the same type.
|
# tag, because many other commands may use the same type.
|
||||||
|
@ -870,13 +842,8 @@ class Registry:
|
||||||
- require - `<require>` block from the registry
|
- require - `<require>` block from the registry
|
||||||
- tag - tag to look for in the require block"""
|
- tag - tag to look for in the require block"""
|
||||||
|
|
||||||
# For the time being, the code below is bypassed. It has the effect
|
if alias and require.findall(tag + "[@name='" + alias + "']"):
|
||||||
# of excluding "spelling aliases" created to comply with the style
|
return True
|
||||||
# guide, but this leaves references out of the specification and
|
|
||||||
# causes broken internal links.
|
|
||||||
#
|
|
||||||
# if alias and require.findall(tag + "[@name='" + alias + "']"):
|
|
||||||
# return True
|
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -935,9 +902,6 @@ class Registry:
|
||||||
if not typeextends in self.gen.featureDictionary[featurename][typecat][required_key]:
|
if not typeextends in self.gen.featureDictionary[featurename][typecat][required_key]:
|
||||||
self.gen.featureDictionary[featurename][typecat][required_key][typeextends] = []
|
self.gen.featureDictionary[featurename][typecat][required_key][typeextends] = []
|
||||||
self.gen.featureDictionary[featurename][typecat][required_key][typeextends].append(typename)
|
self.gen.featureDictionary[featurename][typecat][required_key][typeextends].append(typename)
|
||||||
else:
|
|
||||||
self.gen.logMsg('warn', 'fillFeatureDictionary: NOT filling for {}'.format(typename))
|
|
||||||
|
|
||||||
|
|
||||||
for enumElem in require.findall('enum'):
|
for enumElem in require.findall('enum'):
|
||||||
enumname = enumElem.get('name')
|
enumname = enumElem.get('name')
|
||||||
|
@ -952,18 +916,16 @@ class Registry:
|
||||||
if not enumextends in self.gen.featureDictionary[featurename]['enumconstant'][required_key]:
|
if not enumextends in self.gen.featureDictionary[featurename]['enumconstant'][required_key]:
|
||||||
self.gen.featureDictionary[featurename]['enumconstant'][required_key][enumextends] = []
|
self.gen.featureDictionary[featurename]['enumconstant'][required_key][enumextends] = []
|
||||||
self.gen.featureDictionary[featurename]['enumconstant'][required_key][enumextends].append(enumname)
|
self.gen.featureDictionary[featurename]['enumconstant'][required_key][enumextends].append(enumname)
|
||||||
else:
|
|
||||||
self.gen.logMsg('warn', 'fillFeatureDictionary: NOT filling for {}'.format(typename))
|
|
||||||
|
|
||||||
for cmdElem in require.findall('command'):
|
for cmdElem in require.findall('command'):
|
||||||
|
|
||||||
# Remove aliases in the same extension/feature; these are always added as a correction. Don't need the original to be visible.
|
# Remove aliases in the same extension/feature; these are always added as a correction. Don't need the original to be visible.
|
||||||
alias = self.getAlias(cmdElem, self.cmddict)
|
alias = self.getAlias(cmdElem, self.cmddict)
|
||||||
if not self.checkForCorrectionAliases(alias, require, 'command'):
|
if not self.checkForCorrectionAliases(alias, require, 'command'):
|
||||||
if not required_key in self.gen.featureDictionary[featurename]['command']:
|
if not required_key in self.gen.featureDictionary[featurename]['command']:
|
||||||
self.gen.featureDictionary[featurename]['command'][required_key] = []
|
self.gen.featureDictionary[featurename]['command'][required_key] = []
|
||||||
self.gen.featureDictionary[featurename]['command'][required_key].append(cmdElem.get('name'))
|
self.gen.featureDictionary[featurename]['command'][required_key].append(cmdElem.get('name'))
|
||||||
else:
|
|
||||||
self.gen.logMsg('warn', 'fillFeatureDictionary: NOT filling for {}'.format(typename))
|
|
||||||
|
|
||||||
def requireAndRemoveFeatures(self, interface, featurename, api, profile):
|
def requireAndRemoveFeatures(self, interface, featurename, api, profile):
|
||||||
"""Process `<require>` and `<remove>` tags for a `<version>` or `<extension>`.
|
"""Process `<require>` and `<remove>` tags for a `<version>` or `<extension>`.
|
||||||
|
@ -973,12 +935,10 @@ class Registry:
|
||||||
- featurename - name of the feature
|
- featurename - name of the feature
|
||||||
- api - string specifying API name being generated
|
- api - string specifying API name being generated
|
||||||
- profile - string specifying API profile being generated"""
|
- profile - string specifying API profile being generated"""
|
||||||
|
|
||||||
# <require> marks things that are required by this version/profile
|
# <require> marks things that are required by this version/profile
|
||||||
for feature in interface.findall('require'):
|
for feature in interface.findall('require'):
|
||||||
if matchAPIProfile(api, profile, feature):
|
if matchAPIProfile(api, profile, feature):
|
||||||
self.markRequired(featurename, feature, True)
|
self.markRequired(featurename, feature, True)
|
||||||
|
|
||||||
# <remove> marks things that are removed by this version/profile
|
# <remove> marks things that are removed by this version/profile
|
||||||
for feature in interface.findall('remove'):
|
for feature in interface.findall('remove'):
|
||||||
if matchAPIProfile(api, profile, feature):
|
if matchAPIProfile(api, profile, feature):
|
||||||
|
@ -1207,45 +1167,6 @@ class Registry:
|
||||||
genProc = self.gen.genSpirv
|
genProc = self.gen.genSpirv
|
||||||
genProc(spirv, name, alias)
|
genProc(spirv, name, alias)
|
||||||
|
|
||||||
def stripUnsupportedAPIs(self, dictionary, attribute, supportedDictionary):
|
|
||||||
"""Strip unsupported APIs from attributes of APIs.
|
|
||||||
dictionary - *Info dictionary of APIs to be updated
|
|
||||||
attribute - attribute name to look for in each API
|
|
||||||
supportedDictionary - dictionary in which to look for supported
|
|
||||||
API elements in the attribute"""
|
|
||||||
|
|
||||||
for key in dictionary:
|
|
||||||
eleminfo = dictionary[key]
|
|
||||||
attribstring = eleminfo.elem.get(attribute)
|
|
||||||
if attribstring is not None:
|
|
||||||
apis = []
|
|
||||||
stripped = False
|
|
||||||
for api in attribstring.split(','):
|
|
||||||
##print('Checking API {} referenced by {}'.format(api, key))
|
|
||||||
if supportedDictionary[api].required:
|
|
||||||
apis.append(api)
|
|
||||||
else:
|
|
||||||
stripped = True
|
|
||||||
##print('\t**STRIPPING API {} from {}'.format(api, key))
|
|
||||||
|
|
||||||
# Update the attribute after stripping stuff.
|
|
||||||
# Could sort apis before joining, but it's not a clear win
|
|
||||||
if stripped:
|
|
||||||
eleminfo.elem.set(attribute, ','.join(apis))
|
|
||||||
|
|
||||||
def generateFormat(self, format, dictionary):
|
|
||||||
if format is None:
|
|
||||||
self.gen.logMsg('diag', 'No entry found for format element',
|
|
||||||
'returning!')
|
|
||||||
return
|
|
||||||
|
|
||||||
name = format.elem.get('name')
|
|
||||||
# No known alias for VkFormat elements
|
|
||||||
alias = None
|
|
||||||
if format.emit:
|
|
||||||
genProc = self.gen.genFormat
|
|
||||||
genProc(format, name, alias)
|
|
||||||
|
|
||||||
def apiGen(self):
|
def apiGen(self):
|
||||||
"""Generate interface for specified versions using the current
|
"""Generate interface for specified versions using the current
|
||||||
generator and generator options"""
|
generator and generator options"""
|
||||||
|
@ -1256,13 +1177,8 @@ class Registry:
|
||||||
'profile:', self.genOpts.profile)
|
'profile:', self.genOpts.profile)
|
||||||
self.gen.logMsg('diag', '*******************************************')
|
self.gen.logMsg('diag', '*******************************************')
|
||||||
|
|
||||||
# Could reset required/declared flags for all features here.
|
# Reset required/declared flags for all features
|
||||||
# This has been removed as never used. The initial motivation was
|
self.apiReset()
|
||||||
# the idea of calling apiGen() repeatedly for different targets, but
|
|
||||||
# this has never been done. The 20% or so build-time speedup that
|
|
||||||
# might result is not worth the effort to make it actually work.
|
|
||||||
#
|
|
||||||
#@@ self.apiReset()
|
|
||||||
|
|
||||||
# Compile regexps used to select versions & extensions
|
# Compile regexps used to select versions & extensions
|
||||||
regVersions = re.compile(self.genOpts.versions)
|
regVersions = re.compile(self.genOpts.versions)
|
||||||
|
@ -1271,7 +1187,6 @@ class Registry:
|
||||||
regRemoveExtensions = re.compile(self.genOpts.removeExtensions)
|
regRemoveExtensions = re.compile(self.genOpts.removeExtensions)
|
||||||
regEmitExtensions = re.compile(self.genOpts.emitExtensions)
|
regEmitExtensions = re.compile(self.genOpts.emitExtensions)
|
||||||
regEmitSpirv = re.compile(self.genOpts.emitSpirv)
|
regEmitSpirv = re.compile(self.genOpts.emitSpirv)
|
||||||
regEmitFormats = re.compile(self.genOpts.emitFormats)
|
|
||||||
|
|
||||||
# Get all matching API feature names & add to list of FeatureInfo
|
# Get all matching API feature names & add to list of FeatureInfo
|
||||||
# Note we used to select on feature version attributes, not names.
|
# Note we used to select on feature version attributes, not names.
|
||||||
|
@ -1380,12 +1295,6 @@ class Registry:
|
||||||
si.emit = (regEmitSpirv.match(key) is not None)
|
si.emit = (regEmitSpirv.match(key) is not None)
|
||||||
spirvcaps.append(si)
|
spirvcaps.append(si)
|
||||||
|
|
||||||
formats = []
|
|
||||||
for key in self.formatsdict:
|
|
||||||
si = self.formatsdict[key]
|
|
||||||
si.emit = (regEmitFormats.match(key) is not None)
|
|
||||||
formats.append(si)
|
|
||||||
|
|
||||||
# Sort the features list, if a sort procedure is defined
|
# Sort the features list, if a sort procedure is defined
|
||||||
if self.genOpts.sortProcedure:
|
if self.genOpts.sortProcedure:
|
||||||
self.genOpts.sortProcedure(features)
|
self.genOpts.sortProcedure(features)
|
||||||
|
@ -1407,21 +1316,6 @@ class Registry:
|
||||||
self.requireAndRemoveFeatures(f.elem, f.name, self.genOpts.apiname, self.genOpts.profile)
|
self.requireAndRemoveFeatures(f.elem, f.name, self.genOpts.apiname, self.genOpts.profile)
|
||||||
self.assignAdditionalValidity(f.elem, self.genOpts.apiname, self.genOpts.profile)
|
self.assignAdditionalValidity(f.elem, self.genOpts.apiname, self.genOpts.profile)
|
||||||
|
|
||||||
# Now, strip references to APIs that are not required.
|
|
||||||
# At present such references may occur in:
|
|
||||||
# Structs in <type category="struct"> 'structextends' attributes
|
|
||||||
# Enums in <command> 'successcodes' and 'errorcodes' attributes
|
|
||||||
self.stripUnsupportedAPIs(self.typedict, 'structextends', self.typedict)
|
|
||||||
self.stripUnsupportedAPIs(self.cmddict, 'successcodes', self.enumdict)
|
|
||||||
self.stripUnsupportedAPIs(self.cmddict, 'errorcodes', self.enumdict)
|
|
||||||
|
|
||||||
# @@May need to strip <spirvcapability> / <spirvextension> <enable>
|
|
||||||
# tags of these forms:
|
|
||||||
# <enable version="VK_API_VERSION_1_0"/>
|
|
||||||
# <enable struct="VkPhysicalDeviceFeatures" feature="geometryShader" requires="VK_VERSION_1_0"/>
|
|
||||||
# <enable extension="VK_KHR_shader_draw_parameters"/>
|
|
||||||
# <enable property="VkPhysicalDeviceVulkan12Properties" member="shaderDenormPreserveFloat16" value="VK_TRUE" requires="VK_VERSION_1_2,VK_KHR_shader_float_controls"/>
|
|
||||||
|
|
||||||
# Pass 2: loop over specified API versions and extensions printing
|
# Pass 2: loop over specified API versions and extensions printing
|
||||||
# declarations for required things which haven't already been
|
# declarations for required things which haven't already been
|
||||||
# generated.
|
# generated.
|
||||||
|
@ -1444,8 +1338,6 @@ class Registry:
|
||||||
self.generateSpirv(s, self.spirvextdict)
|
self.generateSpirv(s, self.spirvextdict)
|
||||||
for s in spirvcaps:
|
for s in spirvcaps:
|
||||||
self.generateSpirv(s, self.spirvcapdict)
|
self.generateSpirv(s, self.spirvcapdict)
|
||||||
for s in formats:
|
|
||||||
self.generateFormat(s, self.formatsdict)
|
|
||||||
self.gen.endFile()
|
self.gen.endFile()
|
||||||
|
|
||||||
def apiReset(self):
|
def apiReset(self):
|
||||||
|
@ -1461,44 +1353,39 @@ class Registry:
|
||||||
for cmd in self.apidict:
|
for cmd in self.apidict:
|
||||||
self.apidict[cmd].resetState()
|
self.apidict[cmd].resetState()
|
||||||
|
|
||||||
def __validateStructLimittypes(self, struct):
|
def validateGroups(self):
|
||||||
"""Validate 'limittype' attributes for a single struct."""
|
"""Validate `group=` attributes on `<param>` and `<proto>` tags.
|
||||||
limittypeDiags = namedtuple('limittypeDiags', ['missing', 'invalid'])
|
|
||||||
badFields = defaultdict(lambda : limittypeDiags(missing=[], invalid=[]))
|
|
||||||
validLimittypes = { 'min', 'max', 'bitmask', 'range', 'struct', 'noauto' }
|
|
||||||
for member in struct.getMembers():
|
|
||||||
memberName = member.findtext('name')
|
|
||||||
if memberName in ['sType', 'pNext']:
|
|
||||||
continue
|
|
||||||
limittype = member.get('limittype')
|
|
||||||
if not limittype:
|
|
||||||
badFields[struct.elem.get('name')].missing.append(memberName)
|
|
||||||
elif limittype == 'struct':
|
|
||||||
typeName = member.findtext('type')
|
|
||||||
memberType = self.typedict[typeName]
|
|
||||||
badFields.update(self.__validateStructLimittypes(memberType))
|
|
||||||
elif limittype not in validLimittypes:
|
|
||||||
badFields[struct.elem.get('name')].invalid.append(memberName)
|
|
||||||
return badFields
|
|
||||||
|
|
||||||
def __validateLimittype(self):
|
Check that `group=` attributes match actual groups"""
|
||||||
"""Validate 'limittype' attributes."""
|
# Keep track of group names not in <group> tags
|
||||||
badFields = self.__validateStructLimittypes(self.typedict['VkPhysicalDeviceProperties2'])
|
badGroup = {}
|
||||||
for featStructName in self.validextensionstructs['VkPhysicalDeviceProperties2']:
|
self.gen.logMsg('diag', 'VALIDATING GROUP ATTRIBUTES')
|
||||||
featStruct = self.typedict[featStructName]
|
for cmd in self.reg.findall('commands/command'):
|
||||||
badFields.update(self.__validateStructLimittypes(featStruct))
|
proto = cmd.find('proto')
|
||||||
|
# funcname = cmd.find('proto/name').text
|
||||||
|
group = proto.get('group')
|
||||||
|
if group is not None and group not in self.groupdict:
|
||||||
|
# self.gen.logMsg('diag', '*** Command ', funcname, ' has UNKNOWN return group ', group)
|
||||||
|
if group not in badGroup:
|
||||||
|
badGroup[group] = 1
|
||||||
|
else:
|
||||||
|
badGroup[group] = badGroup[group] + 1
|
||||||
|
|
||||||
if badFields:
|
for param in cmd.findall('param'):
|
||||||
self.gen.logMsg('diag', 'SUMMARY OF FIELDS WITH INCORRECT LIMITTYPES')
|
pname = param.find('name')
|
||||||
for key in sorted(badFields.keys()):
|
if pname is not None:
|
||||||
diags = badFields[key]
|
pname = pname.text
|
||||||
if diags.missing:
|
else:
|
||||||
self.gen.logMsg('diag', ' ', key, 'missing limittype:', ', '.join(badFields[key].missing))
|
pname = param.get('name')
|
||||||
if diags.invalid:
|
group = param.get('group')
|
||||||
self.gen.logMsg('diag', ' ', key, 'invalid limittype:', ', '.join(badFields[key].invalid))
|
if group is not None and group not in self.groupdict:
|
||||||
return False
|
# self.gen.logMsg('diag', '*** Command ', funcname, ' param ', pname, ' has UNKNOWN group ', group)
|
||||||
return True
|
if group not in badGroup:
|
||||||
|
badGroup[group] = 1
|
||||||
|
else:
|
||||||
|
badGroup[group] = badGroup[group] + 1
|
||||||
|
|
||||||
def validateRegistry(self):
|
if badGroup:
|
||||||
"""Validate properties of the registry."""
|
self.gen.logMsg('diag', 'SUMMARY OF UNRECOGNIZED GROUPS')
|
||||||
return self.__validateLimittype()
|
for key in sorted(badGroup.keys()):
|
||||||
|
self.gen.logMsg('diag', ' ', key, ' occurred ', badGroup[key], ' times')
|
||||||
|
|
8028
externals/Vulkan-Headers/registry/validusage.json
vendored
8028
externals/Vulkan-Headers/registry/validusage.json
vendored
File diff suppressed because one or more lines are too long
3400
externals/Vulkan-Headers/registry/vk.xml
vendored
3400
externals/Vulkan-Headers/registry/vk.xml
vendored
File diff suppressed because it is too large
Load diff
|
@ -31,7 +31,6 @@ SPECIAL_WORDS = set((
|
||||||
'Int64', # VkPhysicalDeviceShaderAtomicInt64FeaturesKHR
|
'Int64', # VkPhysicalDeviceShaderAtomicInt64FeaturesKHR
|
||||||
'Int8', # VkPhysicalDeviceShaderFloat16Int8FeaturesKHR
|
'Int8', # VkPhysicalDeviceShaderFloat16Int8FeaturesKHR
|
||||||
'MacOS', # VkMacOSSurfaceCreateInfoMVK
|
'MacOS', # VkMacOSSurfaceCreateInfoMVK
|
||||||
'RGBA10X6', # VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT
|
|
||||||
'Uint8', # VkPhysicalDeviceIndexTypeUint8FeaturesEXT
|
'Uint8', # VkPhysicalDeviceIndexTypeUint8FeaturesEXT
|
||||||
'Win32', # VkWin32SurfaceCreateInfoKHR
|
'Win32', # VkWin32SurfaceCreateInfoKHR
|
||||||
))
|
))
|
||||||
|
|
|
@ -62,8 +62,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||||
void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||||
pthread_t this_thread = pthread_self();
|
pthread_t this_thread = pthread_self();
|
||||||
|
|
||||||
const auto scheduling_type =
|
const auto scheduling_type = SCHED_OTHER;
|
||||||
new_priority != ThreadPriority::Critical ? SCHED_OTHER : SCHED_FIFO;
|
|
||||||
s32 max_prio = sched_get_priority_max(scheduling_type);
|
s32 max_prio = sched_get_priority_max(scheduling_type);
|
||||||
s32 min_prio = sched_get_priority_min(scheduling_type);
|
s32 min_prio = sched_get_priority_min(scheduling_type);
|
||||||
u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U);
|
u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U);
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
|
#include "common/atomic_ops.h"
|
||||||
#include "common/uint128.h"
|
#include "common/uint128.h"
|
||||||
#include "common/x64/native_clock.h"
|
#include "common/x64/native_clock.h"
|
||||||
|
|
||||||
|
|
|
@ -185,6 +185,7 @@ add_library(core STATIC
|
||||||
hle/kernel/k_event.h
|
hle/kernel/k_event.h
|
||||||
hle/kernel/k_handle_table.cpp
|
hle/kernel/k_handle_table.cpp
|
||||||
hle/kernel/k_handle_table.h
|
hle/kernel/k_handle_table.h
|
||||||
|
hle/kernel/k_light_condition_variable.cpp
|
||||||
hle/kernel/k_light_condition_variable.h
|
hle/kernel/k_light_condition_variable.h
|
||||||
hle/kernel/k_light_lock.cpp
|
hle/kernel/k_light_lock.cpp
|
||||||
hle/kernel/k_light_lock.h
|
hle/kernel/k_light_lock.h
|
||||||
|
@ -237,6 +238,7 @@ add_library(core STATIC
|
||||||
hle/kernel/k_system_control.h
|
hle/kernel/k_system_control.h
|
||||||
hle/kernel/k_thread.cpp
|
hle/kernel/k_thread.cpp
|
||||||
hle/kernel/k_thread.h
|
hle/kernel/k_thread.h
|
||||||
|
hle/kernel/k_thread_queue.cpp
|
||||||
hle/kernel/k_thread_queue.h
|
hle/kernel/k_thread_queue.h
|
||||||
hle/kernel/k_trace.h
|
hle/kernel/k_trace.h
|
||||||
hle/kernel/k_transfer_memory.cpp
|
hle/kernel/k_transfer_memory.cpp
|
||||||
|
|
|
@ -521,12 +521,6 @@ const ARM_Interface& System::CurrentArmInterface() const {
|
||||||
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t System::CurrentCoreIndex() const {
|
|
||||||
std::size_t core = impl->kernel.GetCurrentHostThreadID();
|
|
||||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return core;
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
||||||
return impl->kernel.CurrentPhysicalCore();
|
return impl->kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
|
|
|
@ -208,9 +208,6 @@ public:
|
||||||
/// Gets an ARM interface to the CPU core that is currently running
|
/// Gets an ARM interface to the CPU core that is currently running
|
||||||
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
||||||
|
|
||||||
/// Gets the index of the currently running CPU core
|
|
||||||
[[nodiscard]] std::size_t CurrentCoreIndex() const;
|
|
||||||
|
|
||||||
/// Gets the physical core for the CPU core that is currently running
|
/// Gets the physical core for the CPU core that is currently running
|
||||||
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||||
|
|
||||||
|
|
|
@ -230,6 +230,7 @@ std::optional<s64> CoreTiming::Advance() {
|
||||||
event_mutex.unlock();
|
event_mutex.unlock();
|
||||||
|
|
||||||
if (const auto event_type{evt.type.lock()}) {
|
if (const auto event_type{evt.type.lock()}) {
|
||||||
|
std::unique_lock<std::mutex> lk(event_type->guard);
|
||||||
event_type->callback(evt.user_data, std::chrono::nanoseconds{static_cast<s64>(
|
event_type->callback(evt.user_data, std::chrono::nanoseconds{static_cast<s64>(
|
||||||
GetGlobalTimeNs().count() - evt.time)});
|
GetGlobalTimeNs().count() - evt.time)});
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ struct EventType {
|
||||||
TimedCallback callback;
|
TimedCallback callback;
|
||||||
/// A pointer to the name of the event.
|
/// A pointer to the name of the event.
|
||||||
const std::string name;
|
const std::string name;
|
||||||
|
mutable std::mutex guard;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -117,17 +117,18 @@ void CpuManager::MultiCoreRunGuestLoop() {
|
||||||
physical_core = &kernel.CurrentPhysicalCore();
|
physical_core = &kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
system.ExitDynarmicProfile();
|
system.ExitDynarmicProfile();
|
||||||
|
{
|
||||||
|
Kernel::KScopedDisableDispatch dd(kernel);
|
||||||
physical_core->ArmInterface().ClearExclusiveState();
|
physical_core->ArmInterface().ClearExclusiveState();
|
||||||
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::MultiCoreRunIdleThread() {
|
void CpuManager::MultiCoreRunIdleThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
while (true) {
|
while (true) {
|
||||||
auto& physical_core = kernel.CurrentPhysicalCore();
|
Kernel::KScopedDisableDispatch dd(kernel);
|
||||||
physical_core.Idle();
|
kernel.CurrentPhysicalCore().Idle();
|
||||||
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,12 +136,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
kernel.CurrentScheduler()->OnThreadStart();
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
while (true) {
|
while (true) {
|
||||||
auto core = kernel.GetCurrentHostThreadID();
|
auto core = kernel.CurrentPhysicalCoreIndex();
|
||||||
auto& scheduler = *kernel.CurrentScheduler();
|
auto& scheduler = *kernel.CurrentScheduler();
|
||||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||||
ASSERT(scheduler.ContextSwitchPending());
|
ASSERT(scheduler.ContextSwitchPending());
|
||||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
|
||||||
scheduler.RescheduleCurrentCore();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -346,13 +347,9 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||||
sc_sync_first_use = false;
|
sc_sync_first_use = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abort if emulation was killed before the session really starts
|
// Emulation was stopped
|
||||||
if (!system.IsPoweredOn()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stop_token.stop_requested()) {
|
if (stop_token.stop_requested()) {
|
||||||
break;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
@ -28,7 +29,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||||
|
|
||||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.CurrentCoreIndex();
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||||
|
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
@ -58,7 +59,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||||
|
|
||||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.CurrentCoreIndex();
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||||
|
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
@ -85,6 +86,27 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
|
||||||
|
public:
|
||||||
|
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
||||||
|
: KThreadQueue(kernel_), m_tree(t) {}
|
||||||
|
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) override {
|
||||||
|
// If the thread is waiting on an address arbiter, remove it from the tree.
|
||||||
|
if (waiting_thread->IsWaitingForAddressArbiter()) {
|
||||||
|
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||||
|
waiting_thread->ClearAddressArbiter();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke the base cancel wait handler.
|
||||||
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KAddressArbiter::ThreadTree* m_tree;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||||
|
@ -96,14 +118,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
|
// End the thread's wait.
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
target_thread->EndWait(ResultSuccess);
|
||||||
|
|
||||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->Wakeup();
|
target_thread->ClearAddressArbiter();
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
target_thread->ClearAddressArbiter();
|
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,14 +151,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
|
// End the thread's wait.
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
target_thread->EndWait(ResultSuccess);
|
||||||
|
|
||||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->Wakeup();
|
target_thread->ClearAddressArbiter();
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
target_thread->ClearAddressArbiter();
|
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -197,14 +219,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||||
|
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
|
// End the thread's wait.
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
target_thread->EndWait(ResultSuccess);
|
||||||
|
|
||||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->Wakeup();
|
target_thread->ClearAddressArbiter();
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
target_thread->ClearAddressArbiter();
|
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -214,6 +236,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
@ -224,9 +247,6 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||||
return ResultTerminationRequested;
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the synced object.
|
|
||||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
|
||||||
|
|
||||||
// Read the value from userspace.
|
// Read the value from userspace.
|
||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
bool succeeded{};
|
bool succeeded{};
|
||||||
|
@ -256,31 +276,20 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||||
// Set the arbiter.
|
// Set the arbiter.
|
||||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
|
||||||
|
// Wait for the thread to finish.
|
||||||
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the timer wait.
|
|
||||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
|
||||||
|
|
||||||
// Remove from the address arbiter.
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
|
|
||||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
|
||||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
|
||||||
cur_thread->ClearAddressArbiter();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the result.
|
// Get the result.
|
||||||
KSynchronizationObject* dummy{};
|
return cur_thread->GetWaitResult();
|
||||||
return cur_thread->GetWaitResult(&dummy);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
@ -291,9 +300,6 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
return ResultTerminationRequested;
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the synced object.
|
|
||||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
|
||||||
|
|
||||||
// Read the value from userspace.
|
// Read the value from userspace.
|
||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
if (!ReadFromUser(system, &user_value, addr)) {
|
if (!ReadFromUser(system, &user_value, addr)) {
|
||||||
|
@ -316,26 +322,14 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
// Set the arbiter.
|
// Set the arbiter.
|
||||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
|
||||||
|
// Wait for the thread to finish.
|
||||||
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the timer wait.
|
|
||||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
|
||||||
|
|
||||||
// Remove from the address arbiter.
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
|
|
||||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
|
||||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
|
||||||
cur_thread->ClearAddressArbiter();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the result.
|
// Get the result.
|
||||||
KSynchronizationObject* dummy{};
|
return cur_thread->GetWaitResult();
|
||||||
return cur_thread->GetWaitResult(&dummy);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -170,6 +170,10 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::string& GetName() const {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void RegisterWithKernel();
|
void RegisterWithKernel();
|
||||||
void UnregisterWithKernel();
|
void UnregisterWithKernel();
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_common.h"
|
#include "core/hle/kernel/svc_common.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
@ -33,7 +34,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||||
u32 new_orr_mask) {
|
u32 new_orr_mask) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.CurrentCoreIndex();
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||||
|
|
||||||
// Load the value from the address.
|
// Load the value from the address.
|
||||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||||
|
@ -57,6 +58,48 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
||||||
|
public:
|
||||||
|
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
||||||
|
: KThreadQueue(kernel_) {}
|
||||||
|
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) override {
|
||||||
|
// Remove the thread as a waiter from its owner.
|
||||||
|
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
||||||
|
|
||||||
|
// Invoke the base cancel wait handler.
|
||||||
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
|
||||||
|
private:
|
||||||
|
KConditionVariable::ThreadTree* m_tree;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
|
||||||
|
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
||||||
|
: KThreadQueue(kernel_), m_tree(t) {}
|
||||||
|
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) override {
|
||||||
|
// Remove the thread as a waiter from its owner.
|
||||||
|
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||||
|
owner->RemoveWaiter(waiting_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the thread is waiting on a condvar, remove it from the tree.
|
||||||
|
if (waiting_thread->IsWaitingForConditionVariable()) {
|
||||||
|
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||||
|
waiting_thread->ClearConditionVariable();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke the base cancel wait handler.
|
||||||
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||||
|
@ -78,84 +121,77 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||||
|
|
||||||
// Determine the next tag.
|
// Determine the next tag.
|
||||||
u32 next_value{};
|
u32 next_value{};
|
||||||
if (next_owner_thread) {
|
if (next_owner_thread != nullptr) {
|
||||||
next_value = next_owner_thread->GetAddressKeyValue();
|
next_value = next_owner_thread->GetAddressKeyValue();
|
||||||
if (num_waiters > 1) {
|
if (num_waiters > 1) {
|
||||||
next_value |= Svc::HandleWaitMask;
|
next_value |= Svc::HandleWaitMask;
|
||||||
}
|
}
|
||||||
|
|
||||||
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
|
|
||||||
next_owner_thread->Wakeup();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the value to userspace.
|
// Write the value to userspace.
|
||||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
ResultCode result{ResultSuccess};
|
||||||
if (next_owner_thread) {
|
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
||||||
next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
result = ResultSuccess;
|
||||||
|
} else {
|
||||||
|
result = ResultInvalidCurrentMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultInvalidCurrentMemory;
|
// Signal the next owner thread.
|
||||||
}
|
next_owner_thread->EndWait(result);
|
||||||
}
|
return result;
|
||||||
|
} else {
|
||||||
|
// Just write the value to userspace.
|
||||||
|
R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
|
||||||
|
ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
|
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||||
|
|
||||||
// Wait for the address.
|
// Wait for the address.
|
||||||
{
|
KThread* owner_thread{};
|
||||||
KScopedAutoObject<KThread> owner_thread;
|
|
||||||
ASSERT(owner_thread.IsNull());
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
cur_thread->SetSyncedObject(nullptr, ResultSuccess);
|
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
// Check if the thread should terminate.
|
||||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||||
|
|
||||||
{
|
|
||||||
// Read the tag from userspace.
|
// Read the tag from userspace.
|
||||||
u32 test_tag{};
|
u32 test_tag{};
|
||||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
||||||
ResultInvalidCurrentMemory);
|
|
||||||
|
|
||||||
// If the tag isn't the handle (with wait mask), we're done.
|
// If the tag isn't the handle (with wait mask), we're done.
|
||||||
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess);
|
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
||||||
|
|
||||||
// Get the lock owner thread.
|
// Get the lock owner thread.
|
||||||
owner_thread =
|
owner_thread = kernel.CurrentProcess()
|
||||||
kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
|
->GetHandleTable()
|
||||||
handle);
|
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
||||||
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
|
.ReleasePointerUnsafe();
|
||||||
|
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
|
||||||
|
|
||||||
// Update the lock.
|
// Update the lock.
|
||||||
cur_thread->SetAddressKey(addr, value);
|
cur_thread->SetAddressKey(addr, value);
|
||||||
owner_thread->AddWaiter(cur_thread);
|
owner_thread->AddWaiter(cur_thread);
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
|
||||||
|
// Begin waiting.
|
||||||
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
ASSERT(owner_thread.IsNotNull());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the thread as a waiter from the lock owner.
|
// Close our reference to the owner thread, now that the wait is over.
|
||||||
{
|
owner_thread->Close();
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
KThread* owner_thread = cur_thread->GetLockOwner();
|
|
||||||
if (owner_thread != nullptr) {
|
|
||||||
owner_thread->RemoveWaiter(cur_thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the wait result.
|
// Get the wait result.
|
||||||
KSynchronizationObject* dummy{};
|
return cur_thread->GetWaitResult();
|
||||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
void KConditionVariable::SignalImpl(KThread* thread) {
|
||||||
// Check pre-conditions.
|
// Check pre-conditions.
|
||||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
@ -169,18 +205,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
can_access = true;
|
can_access = true;
|
||||||
if (can_access) {
|
if (can_access) [[likely]] {
|
||||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||||
Svc::HandleWaitMask);
|
Svc::HandleWaitMask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KThread* thread_to_close = nullptr;
|
if (can_access) [[likely]] {
|
||||||
if (can_access) {
|
|
||||||
if (prev_tag == Svc::InvalidHandle) {
|
if (prev_tag == Svc::InvalidHandle) {
|
||||||
// If nobody held the lock previously, we're all good.
|
// If nobody held the lock previously, we're all good.
|
||||||
thread->SetSyncedObject(nullptr, ResultSuccess);
|
thread->EndWait(ResultSuccess);
|
||||||
thread->Wakeup();
|
|
||||||
} else {
|
} else {
|
||||||
// Get the previous owner.
|
// Get the previous owner.
|
||||||
KThread* owner_thread = kernel.CurrentProcess()
|
KThread* owner_thread = kernel.CurrentProcess()
|
||||||
|
@ -189,35 +223,24 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||||
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
||||||
.ReleasePointerUnsafe();
|
.ReleasePointerUnsafe();
|
||||||
|
|
||||||
if (owner_thread) {
|
if (owner_thread) [[likely]] {
|
||||||
// Add the thread as a waiter on the owner.
|
// Add the thread as a waiter on the owner.
|
||||||
owner_thread->AddWaiter(thread);
|
owner_thread->AddWaiter(thread);
|
||||||
thread_to_close = owner_thread;
|
owner_thread->Close();
|
||||||
} else {
|
} else {
|
||||||
// The lock was tagged with a thread that doesn't exist.
|
// The lock was tagged with a thread that doesn't exist.
|
||||||
thread->SetSyncedObject(nullptr, ResultInvalidState);
|
thread->EndWait(ResultInvalidState);
|
||||||
thread->Wakeup();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If the address wasn't accessible, note so.
|
// If the address wasn't accessible, note so.
|
||||||
thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
thread->EndWait(ResultInvalidCurrentMemory);
|
||||||
thread->Wakeup();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return thread_to_close;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
// Prepare for signaling.
|
|
||||||
constexpr int MaxThreads = 16;
|
|
||||||
|
|
||||||
KLinkedList<KThread> thread_list{kernel};
|
|
||||||
std::array<KThread*, MaxThreads> thread_array;
|
|
||||||
s32 num_to_close{};
|
|
||||||
|
|
||||||
// Perform signaling.
|
// Perform signaling.
|
||||||
s32 num_waiters{};
|
int num_waiters = 0;
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
@ -226,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
(it->GetConditionVariableKey() == cv_key)) {
|
(it->GetConditionVariableKey() == cv_key)) {
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
|
|
||||||
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
|
this->SignalImpl(target_thread);
|
||||||
if (num_to_close < MaxThreads) {
|
|
||||||
thread_array[num_to_close++] = thread;
|
|
||||||
} else {
|
|
||||||
thread_list.push_back(*thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
target_thread->ClearConditionVariable();
|
target_thread->ClearConditionVariable();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
|
@ -241,31 +257,20 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
|
|
||||||
// If we have no waiters, clear the has waiter flag.
|
// If we have no waiters, clear the has waiter flag.
|
||||||
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||||
const u32 has_waiter_flag{};
|
const u32 has_waiter_flag = 0;
|
||||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close threads in the array.
|
|
||||||
for (auto i = 0; i < num_to_close; ++i) {
|
|
||||||
thread_array[i]->Close();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close threads in the list.
|
|
||||||
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
|
|
||||||
(*it).Close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||||
|
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
||||||
|
kernel, std::addressof(thread_tree));
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
|
||||||
|
|
||||||
// Set the synced object.
|
|
||||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
|
||||||
|
|
||||||
// Check that the thread isn't terminating.
|
// Check that the thread isn't terminating.
|
||||||
if (cur_thread->IsTerminationRequested()) {
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
|
@ -290,8 +295,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wake up the next owner.
|
// Wake up the next owner.
|
||||||
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
|
next_owner_thread->EndWait(ResultSuccess);
|
||||||
next_owner_thread->Wakeup();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write to the cv key.
|
// Write to the cv key.
|
||||||
|
@ -308,40 +312,21 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If timeout is zero, time out.
|
||||||
|
R_UNLESS(timeout != 0, ResultTimedOut);
|
||||||
|
|
||||||
// Update condition variable tracking.
|
// Update condition variable tracking.
|
||||||
{
|
|
||||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
}
|
|
||||||
|
|
||||||
// If the timeout is non-zero, set the thread as waiting.
|
// Begin waiting.
|
||||||
if (timeout != 0) {
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Cancel the timer wait.
|
// Get the wait result.
|
||||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
return cur_thread->GetWaitResult();
|
||||||
|
|
||||||
// Remove from the condition variable.
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
|
|
||||||
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
|
||||||
owner->RemoveWaiter(cur_thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cur_thread->IsWaitingForConditionVariable()) {
|
|
||||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
|
||||||
cur_thread->ClearConditionVariable();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the result.
|
|
||||||
KSynchronizationObject* dummy{};
|
|
||||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -34,7 +34,7 @@ public:
|
||||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
[[nodiscard]] KThread* SignalImpl(KThread* thread);
|
void SignalImpl(KThread* thread);
|
||||||
|
|
||||||
ThreadTree thread_tree;
|
ThreadTree thread_tree;
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() {
|
||||||
// Get the table and clear our record of it.
|
// Get the table and clear our record of it.
|
||||||
u16 saved_table_size = 0;
|
u16 saved_table_size = 0;
|
||||||
{
|
{
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
std::swap(m_table_size, saved_table_size);
|
std::swap(m_table_size, saved_table_size);
|
||||||
|
@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) {
|
||||||
// Find the object and free the entry.
|
// Find the object and free the entry.
|
||||||
KAutoObject* obj = nullptr;
|
KAutoObject* obj = nullptr;
|
||||||
{
|
{
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if (this->IsValidHandle(handle)) {
|
if (this->IsValidHandle(handle)) {
|
||||||
|
@ -62,6 +64,7 @@ bool KHandleTable::Remove(Handle handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Never exceed our capacity.
|
// Never exceed our capacity.
|
||||||
|
@ -84,6 +87,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Never exceed our capacity.
|
// Never exceed our capacity.
|
||||||
|
@ -94,6 +98,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KHandleTable::Unreserve(Handle handle) {
|
void KHandleTable::Unreserve(Handle handle) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Unpack the handle.
|
// Unpack the handle.
|
||||||
|
@ -112,6 +117,7 @@ void KHandleTable::Unreserve(Handle handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Unpack the handle.
|
// Unpack the handle.
|
||||||
|
|
|
@ -68,6 +68,7 @@ public:
|
||||||
template <typename T = KAutoObject>
|
template <typename T = KAutoObject>
|
||||||
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
||||||
// Lock and look up in table.
|
// Lock and look up in table.
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||||
|
@ -122,6 +123,7 @@ public:
|
||||||
size_t num_opened;
|
size_t num_opened;
|
||||||
{
|
{
|
||||||
// Lock the table.
|
// Lock the table.
|
||||||
|
KScopedDisableDispatch dd(kernel);
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
||||||
// Get the current handle.
|
// Get the current handle.
|
||||||
|
|
80
src/core/hle/kernel/k_light_condition_variable.cpp
Executable file
80
src/core/hle/kernel/k_light_condition_variable.cpp
Executable file
|
@ -0,0 +1,80 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_light_condition_variable.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
|
||||||
|
public:
|
||||||
|
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
|
||||||
|
bool term)
|
||||||
|
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||||
|
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) override {
|
||||||
|
// Only process waits if we're allowed to.
|
||||||
|
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the thread from the waiting thread from the light condition variable.
|
||||||
|
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||||
|
|
||||||
|
// Invoke the base cancel wait handler.
|
||||||
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KThread::WaiterList* m_wait_list;
|
||||||
|
bool m_allow_terminating_thread;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
||||||
|
// Create thread queue.
|
||||||
|
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||||
|
|
||||||
|
ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
|
||||||
|
allow_terminating_thread);
|
||||||
|
|
||||||
|
// Sleep the thread.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
|
||||||
|
|
||||||
|
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||||
|
lk.CancelSleep();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
lock->Unlock();
|
||||||
|
|
||||||
|
// Add the thread to the queue.
|
||||||
|
wait_list.push_back(*owner);
|
||||||
|
|
||||||
|
// Begin waiting.
|
||||||
|
owner->BeginWait(std::addressof(wait_queue));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-acquire the lock.
|
||||||
|
lock->Lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KLightConditionVariable::Broadcast() {
|
||||||
|
KScopedSchedulerLock lk(kernel);
|
||||||
|
|
||||||
|
// Signal all threads.
|
||||||
|
for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
|
||||||
|
it->EndWait(ResultSuccess);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -1,73 +1,25 @@
|
||||||
// Copyright 2020 yuzu Emulator Project
|
// Copyright 2021 yuzu Emulator Project
|
||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
|
||||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
|
||||||
#include "core/hle/kernel/time_manager.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
|
class KLightLock;
|
||||||
|
|
||||||
class KLightConditionVariable {
|
class KLightConditionVariable {
|
||||||
public:
|
public:
|
||||||
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||||
|
|
||||||
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) {
|
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
|
||||||
WaitImpl(lock, timeout, allow_terminating_thread);
|
void Broadcast();
|
||||||
}
|
|
||||||
|
|
||||||
void Broadcast() {
|
|
||||||
KScopedSchedulerLock lk{kernel};
|
|
||||||
|
|
||||||
// Signal all threads.
|
|
||||||
for (auto& thread : wait_list) {
|
|
||||||
thread.SetState(ThreadState::Runnable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
|
||||||
KThread* owner = GetCurrentThreadPointer(kernel);
|
|
||||||
|
|
||||||
// Sleep the thread.
|
|
||||||
{
|
|
||||||
KScopedSchedulerLockAndSleep lk{kernel, owner, timeout};
|
|
||||||
|
|
||||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
|
||||||
lk.CancelSleep();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
lock->Unlock();
|
|
||||||
|
|
||||||
// Set the thread as waiting.
|
|
||||||
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
|
||||||
|
|
||||||
// Add the thread to the queue.
|
|
||||||
wait_list.push_back(GetCurrentThread(kernel));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the thread from the wait list.
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cancel the task that the sleep setup.
|
|
||||||
kernel.TimeManager().UnscheduleTimeEvent(owner);
|
|
||||||
|
|
||||||
// Re-acquire the lock.
|
|
||||||
lock->Lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
KThread::WaiterList wait_list{};
|
KThread::WaiterList wait_list{};
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,44 +5,54 @@
|
||||||
#include "core/hle/kernel/k_light_lock.h"
|
#include "core/hle/kernel/k_light_lock.h"
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||||
|
public:
|
||||||
|
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||||
|
|
||||||
|
virtual void CancelWait([[maybe_unused]] KThread* waiting_thread,
|
||||||
|
[[maybe_unused]] ResultCode wait_result,
|
||||||
|
[[maybe_unused]] bool cancel_timer_task) override {
|
||||||
|
// Do nothing, waiting to acquire a light lock cannot be canceled.
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
void KLightLock::Lock() {
|
void KLightLock::Lock() {
|
||||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||||
const uintptr_t cur_thread_tag = (cur_thread | 1);
|
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
||||||
|
|
||||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
|
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
||||||
std::memory_order_acquire)) {
|
std::memory_order_acquire)) {
|
||||||
if ((old_tag | 1) == cur_thread_tag) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
|
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
LockSlowPath(old_tag | 1, cur_thread);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KLightLock::Unlock() {
|
void KLightLock::Unlock() {
|
||||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||||
|
|
||||||
uintptr_t expected = cur_thread;
|
uintptr_t expected = cur_thread;
|
||||||
do {
|
if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
||||||
if (expected != cur_thread) {
|
this->UnlockSlowPath(cur_thread);
|
||||||
return UnlockSlowPath(cur_thread);
|
|
||||||
}
|
}
|
||||||
} while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||||
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||||
|
ThreadQueueImplForKLightLock wait_queue(kernel);
|
||||||
|
|
||||||
// Pend the current thread waiting on the owner thread.
|
// Pend the current thread waiting on the owner thread.
|
||||||
{
|
{
|
||||||
|
@ -50,30 +60,23 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||||
|
|
||||||
// Ensure we actually have locking to do.
|
// Ensure we actually have locking to do.
|
||||||
if (tag.load(std::memory_order_relaxed) != _owner) {
|
if (tag.load(std::memory_order_relaxed) != _owner) {
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the current thread as a waiter on the owner.
|
// Add the current thread as a waiter on the owner.
|
||||||
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
|
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ul);
|
||||||
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||||
owner_thread->AddWaiter(cur_thread);
|
owner_thread->AddWaiter(cur_thread);
|
||||||
|
|
||||||
// Set thread states.
|
// Begin waiting to hold the lock.
|
||||||
cur_thread->SetState(ThreadState::Waiting);
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||||
|
|
||||||
if (owner_thread->IsSuspended()) {
|
if (owner_thread->IsSuspended()) {
|
||||||
owner_thread->ContinueIfHasKernelWaiters();
|
owner_thread->ContinueIfHasKernelWaiters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We're no longer waiting on the lock owner.
|
return true;
|
||||||
{
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
|
|
||||||
owner_thread->RemoveWaiter(cur_thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||||
|
@ -81,22 +84,20 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||||
|
|
||||||
// Unlock.
|
// Unlock.
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// Get the next owner.
|
// Get the next owner.
|
||||||
s32 num_waiters = 0;
|
s32 num_waiters;
|
||||||
KThread* next_owner = owner_thread->RemoveWaiterByKey(
|
KThread* next_owner = owner_thread->RemoveWaiterByKey(
|
||||||
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||||
|
|
||||||
// Pass the lock to the next owner.
|
// Pass the lock to the next owner.
|
||||||
uintptr_t next_tag = 0;
|
uintptr_t next_tag = 0;
|
||||||
if (next_owner != nullptr) {
|
if (next_owner != nullptr) {
|
||||||
next_tag = reinterpret_cast<uintptr_t>(next_owner);
|
next_tag =
|
||||||
if (num_waiters > 1) {
|
reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
|
||||||
next_tag |= 0x1;
|
|
||||||
}
|
|
||||||
|
|
||||||
next_owner->SetState(ThreadState::Runnable);
|
next_owner->EndWait(ResultSuccess);
|
||||||
|
|
||||||
if (next_owner->IsSuspended()) {
|
if (next_owner->IsSuspended()) {
|
||||||
next_owner->ContinueIfHasKernelWaiters();
|
next_owner->ContinueIfHasKernelWaiters();
|
||||||
|
@ -110,7 +111,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the new tag value.
|
// Write the new tag value.
|
||||||
tag.store(next_tag);
|
tag.store(next_tag, std::memory_order_release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ public:
|
||||||
|
|
||||||
void Unlock();
|
void Unlock();
|
||||||
|
|
||||||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||||
|
|
||||||
void UnlockSlowPath(uintptr_t cur_thread);
|
void UnlockSlowPath(uintptr_t cur_thread);
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||||
thread->GetContext64().cpu_registers[0] = 0;
|
thread->GetContext64().cpu_registers[0] = 0;
|
||||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||||
|
thread->DisableDispatch();
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||||
|
@ -227,12 +228,15 @@ void KProcess::PinCurrentThread() {
|
||||||
const s32 core_id = GetCurrentCoreId(kernel);
|
const s32 core_id = GetCurrentCoreId(kernel);
|
||||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||||
|
|
||||||
|
// If the thread isn't terminated, pin it.
|
||||||
|
if (!cur_thread->IsTerminationRequested()) {
|
||||||
// Pin it.
|
// Pin it.
|
||||||
PinThread(core_id, cur_thread);
|
PinThread(core_id, cur_thread);
|
||||||
cur_thread->Pin();
|
cur_thread->Pin();
|
||||||
|
|
||||||
// An update is needed.
|
// An update is needed.
|
||||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::UnpinCurrentThread() {
|
void KProcess::UnpinCurrentThread() {
|
||||||
|
@ -250,6 +254,20 @@ void KProcess::UnpinCurrentThread() {
|
||||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KProcess::UnpinThread(KThread* thread) {
|
||||||
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// Get the thread's core id.
|
||||||
|
const auto core_id = thread->GetActiveCore();
|
||||||
|
|
||||||
|
// Unpin it.
|
||||||
|
UnpinThread(core_id, thread);
|
||||||
|
thread->Unpin();
|
||||||
|
|
||||||
|
// An update is needed.
|
||||||
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||||
[[maybe_unused]] size_t size) {
|
[[maybe_unused]] size_t size) {
|
||||||
// Lock ourselves, to prevent concurrent access.
|
// Lock ourselves, to prevent concurrent access.
|
||||||
|
|
|
@ -259,7 +259,7 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
return pinned_threads[core_id];
|
return pinned_threads.at(core_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||||
|
@ -347,6 +347,7 @@ public:
|
||||||
|
|
||||||
void PinCurrentThread();
|
void PinCurrentThread();
|
||||||
void UnpinCurrentThread();
|
void UnpinCurrentThread();
|
||||||
|
void UnpinThread(KThread* thread);
|
||||||
|
|
||||||
KLightLock& GetStateLock() {
|
KLightLock& GetStateLock() {
|
||||||
return state_lock;
|
return state_lock;
|
||||||
|
@ -368,14 +369,14 @@ private:
|
||||||
void PinThread(s32 core_id, KThread* thread) {
|
void PinThread(s32 core_id, KThread* thread) {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
ASSERT(thread != nullptr);
|
ASSERT(thread != nullptr);
|
||||||
ASSERT(pinned_threads[core_id] == nullptr);
|
ASSERT(pinned_threads.at(core_id) == nullptr);
|
||||||
pinned_threads[core_id] = thread;
|
pinned_threads[core_id] = thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnpinThread(s32 core_id, KThread* thread) {
|
void UnpinThread(s32 core_id, KThread* thread) {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
ASSERT(thread != nullptr);
|
ASSERT(thread != nullptr);
|
||||||
ASSERT(pinned_threads[core_id] == thread);
|
ASSERT(pinned_threads.at(core_id) == thread);
|
||||||
pinned_threads[core_id] = nullptr;
|
pinned_threads[core_id] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
|
||||||
|
|
||||||
// If the thread is runnable, we want to change its priority in the queue.
|
// If the thread is runnable, we want to change its priority in the queue.
|
||||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||||
GetPriorityQueue(kernel).ChangePriority(
|
GetPriorityQueue(kernel).ChangePriority(old_priority,
|
||||||
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
thread == kernel.GetCurrentEmuThread(), thread);
|
||||||
IncrementScheduledCount(thread);
|
IncrementScheduledCount(thread);
|
||||||
SetSchedulerUpdateNeeded(kernel);
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
||||||
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
|
return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
||||||
|
@ -376,20 +376,28 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
if (kernel.IsShuttingDown()) {
|
||||||
scheduler->GetCurrentThread()->DisableDispatch();
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
|
||||||
|
GetCurrentThreadPointer(kernel)->DisableDispatch();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
||||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
|
if (kernel.IsShuttingDown()) {
|
||||||
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
|
return;
|
||||||
scheduler->GetCurrentThread()->EnableDispatch();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1);
|
||||||
|
|
||||||
|
if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) {
|
||||||
|
GetCurrentThreadPointer(kernel)->EnableDispatch();
|
||||||
|
} else {
|
||||||
RescheduleCores(kernel, cores_needing_scheduling);
|
RescheduleCores(kernel, cores_needing_scheduling);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||||
|
@ -617,13 +625,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
|
||||||
state.highest_priority_thread = nullptr;
|
state.highest_priority_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
KScheduler::~KScheduler() {
|
void KScheduler::Finalize() {
|
||||||
if (idle_thread) {
|
if (idle_thread) {
|
||||||
idle_thread->Close();
|
idle_thread->Close();
|
||||||
idle_thread = nullptr;
|
idle_thread = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KScheduler::~KScheduler() {
|
||||||
|
ASSERT(!idle_thread);
|
||||||
|
}
|
||||||
|
|
||||||
KThread* KScheduler::GetCurrentThread() const {
|
KThread* KScheduler::GetCurrentThread() const {
|
||||||
if (auto result = current_thread.load(); result) {
|
if (auto result = current_thread.load(); result) {
|
||||||
return result;
|
return result;
|
||||||
|
@ -642,10 +654,12 @@ void KScheduler::RescheduleCurrentCore() {
|
||||||
if (phys_core.IsInterrupted()) {
|
if (phys_core.IsInterrupted()) {
|
||||||
phys_core.ClearInterrupt();
|
phys_core.ClearInterrupt();
|
||||||
}
|
}
|
||||||
|
|
||||||
guard.Lock();
|
guard.Lock();
|
||||||
if (state.needs_scheduling.load()) {
|
if (state.needs_scheduling.load()) {
|
||||||
Schedule();
|
Schedule();
|
||||||
} else {
|
} else {
|
||||||
|
GetCurrentThread()->EnableDispatch();
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -655,26 +669,33 @@ void KScheduler::OnThreadStart() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Unload(KThread* thread) {
|
void KScheduler::Unload(KThread* thread) {
|
||||||
|
ASSERT(thread);
|
||||||
|
|
||||||
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||||
|
|
||||||
if (thread) {
|
|
||||||
if (thread->IsCallingSvc()) {
|
if (thread->IsCallingSvc()) {
|
||||||
thread->ClearIsCallingSvc();
|
thread->ClearIsCallingSvc();
|
||||||
}
|
}
|
||||||
if (!thread->IsTerminationRequested()) {
|
|
||||||
prev_thread = thread;
|
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
auto& physical_core = system.Kernel().PhysicalCore(core_id);
|
||||||
|
if (!physical_core.IsInitialized()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
|
||||||
cpu_core.SaveContext(thread->GetContext32());
|
cpu_core.SaveContext(thread->GetContext32());
|
||||||
cpu_core.SaveContext(thread->GetContext64());
|
cpu_core.SaveContext(thread->GetContext64());
|
||||||
// Save the TPIDR_EL0 system register in case it was modified.
|
// Save the TPIDR_EL0 system register in case it was modified.
|
||||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||||
cpu_core.ClearExclusiveState();
|
cpu_core.ClearExclusiveState();
|
||||||
|
|
||||||
|
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
|
||||||
|
prev_thread = thread;
|
||||||
} else {
|
} else {
|
||||||
prev_thread = nullptr;
|
prev_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
thread->context_guard.Unlock();
|
thread->context_guard.Unlock();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Reload(KThread* thread) {
|
void KScheduler::Reload(KThread* thread) {
|
||||||
|
@ -683,11 +704,6 @@ void KScheduler::Reload(KThread* thread) {
|
||||||
if (thread) {
|
if (thread) {
|
||||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||||
|
|
||||||
auto* const thread_owner_process = thread->GetOwnerProcess();
|
|
||||||
if (thread_owner_process != nullptr) {
|
|
||||||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
|
||||||
}
|
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
cpu_core.LoadContext(thread->GetContext32());
|
cpu_core.LoadContext(thread->GetContext32());
|
||||||
cpu_core.LoadContext(thread->GetContext64());
|
cpu_core.LoadContext(thread->GetContext64());
|
||||||
|
@ -705,7 +721,7 @@ void KScheduler::SwitchContextStep2() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::ScheduleImpl() {
|
void KScheduler::ScheduleImpl() {
|
||||||
KThread* previous_thread = current_thread.load();
|
KThread* previous_thread = GetCurrentThread();
|
||||||
KThread* next_thread = state.highest_priority_thread;
|
KThread* next_thread = state.highest_priority_thread;
|
||||||
|
|
||||||
state.needs_scheduling = false;
|
state.needs_scheduling = false;
|
||||||
|
@ -717,10 +733,15 @@ void KScheduler::ScheduleImpl() {
|
||||||
|
|
||||||
// If we're not actually switching thread, there's nothing to do.
|
// If we're not actually switching thread, there's nothing to do.
|
||||||
if (next_thread == current_thread.load()) {
|
if (next_thread == current_thread.load()) {
|
||||||
|
previous_thread->EnableDispatch();
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (next_thread->GetCurrentCore() != core_id) {
|
||||||
|
next_thread->SetCurrentCore(core_id);
|
||||||
|
}
|
||||||
|
|
||||||
current_thread.store(next_thread);
|
current_thread.store(next_thread);
|
||||||
|
|
||||||
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
||||||
|
@ -731,11 +752,7 @@ void KScheduler::ScheduleImpl() {
|
||||||
Unload(previous_thread);
|
Unload(previous_thread);
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber>* old_context;
|
std::shared_ptr<Common::Fiber>* old_context;
|
||||||
if (previous_thread != nullptr) {
|
|
||||||
old_context = &previous_thread->GetHostContext();
|
old_context = &previous_thread->GetHostContext();
|
||||||
} else {
|
|
||||||
old_context = &idle_thread->GetHostContext();
|
|
||||||
}
|
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
|
|
||||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||||
|
|
|
@ -33,6 +33,8 @@ public:
|
||||||
explicit KScheduler(Core::System& system_, s32 core_id_);
|
explicit KScheduler(Core::System& system_, s32 core_id_);
|
||||||
~KScheduler();
|
~KScheduler();
|
||||||
|
|
||||||
|
void Finalize();
|
||||||
|
|
||||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||||
void RescheduleCurrentCore();
|
void RescheduleCurrentCore();
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,11 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void Lock() {
|
void Lock() {
|
||||||
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
|
if (kernel.IsShuttingDown()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (IsLockedByCurrentThread()) {
|
if (IsLockedByCurrentThread()) {
|
||||||
// If we already own the lock, we can just increment the count.
|
// If we already own the lock, we can just increment the count.
|
||||||
ASSERT(lock_count > 0);
|
ASSERT(lock_count > 0);
|
||||||
|
@ -43,6 +48,11 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unlock() {
|
void Unlock() {
|
||||||
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
|
if (kernel.IsShuttingDown()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(IsLockedByCurrentThread());
|
ASSERT(IsLockedByCurrentThread());
|
||||||
ASSERT(lock_count > 0);
|
ASSERT(lock_count > 0);
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/kernel/global_scheduler_context.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
|
@ -175,8 +175,7 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (!context.IsThreadWaiting()) {
|
if (!context.IsThreadWaiting()) {
|
||||||
context.GetThread().Wakeup();
|
context.GetThread().EndWait(result);
|
||||||
context.GetThread().SetSyncedObject(nullptr, result);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,11 +8,66 @@
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
|
||||||
|
public:
|
||||||
|
ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
|
||||||
|
KSynchronizationObject::ThreadListNode* n, s32 c)
|
||||||
|
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
|
||||||
|
|
||||||
|
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
||||||
|
ResultCode wait_result) override {
|
||||||
|
// Determine the sync index, and unlink all nodes.
|
||||||
|
s32 sync_index = -1;
|
||||||
|
for (auto i = 0; i < m_count; ++i) {
|
||||||
|
// Check if this is the signaled object.
|
||||||
|
if (m_objects[i] == signaled_object && sync_index == -1) {
|
||||||
|
sync_index = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlink the current node from the current object.
|
||||||
|
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the waiting thread's sync index.
|
||||||
|
waiting_thread->SetSyncedIndex(sync_index);
|
||||||
|
|
||||||
|
// Set the waiting thread as not cancellable.
|
||||||
|
waiting_thread->ClearCancellable();
|
||||||
|
|
||||||
|
// Invoke the base end wait handler.
|
||||||
|
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) override {
|
||||||
|
// Remove all nodes from our list.
|
||||||
|
for (auto i = 0; i < m_count; ++i) {
|
||||||
|
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the waiting thread as not cancellable.
|
||||||
|
waiting_thread->ClearCancellable();
|
||||||
|
|
||||||
|
// Invoke the base cancel wait handler.
|
||||||
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KSynchronizationObject** m_objects;
|
||||||
|
KSynchronizationObject::ThreadListNode* m_nodes;
|
||||||
|
s32 m_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
void KSynchronizationObject::Finalize() {
|
void KSynchronizationObject::Finalize() {
|
||||||
this->OnFinalizeSynchronizationObject();
|
this->OnFinalizeSynchronizationObject();
|
||||||
KAutoObject::Finalize();
|
KAutoObject::Finalize();
|
||||||
|
@ -25,11 +80,19 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||||
|
|
||||||
// Prepare for wait.
|
// Prepare for wait.
|
||||||
KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread();
|
KThread* thread = GetCurrentThreadPointer(kernel_ctx);
|
||||||
|
ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
|
||||||
|
thread_nodes.data(), num_objects);
|
||||||
|
|
||||||
{
|
{
|
||||||
// Setup the scheduling lock and sleep.
|
// Setup the scheduling lock and sleep.
|
||||||
KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout};
|
KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
|
||||||
|
|
||||||
|
// Check if the thread should terminate.
|
||||||
|
if (thread->IsTerminationRequested()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return ResultTerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if any of the objects are already signaled.
|
// Check if any of the objects are already signaled.
|
||||||
for (auto i = 0; i < num_objects; ++i) {
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
|
@ -48,12 +111,6 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||||
return ResultTimedOut;
|
return ResultTimedOut;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
|
||||||
if (thread->IsTerminationRequested()) {
|
|
||||||
slp.CancelSleep();
|
|
||||||
return ResultTerminationRequested;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if waiting was canceled.
|
// Check if waiting was canceled.
|
||||||
if (thread->IsWaitCancelled()) {
|
if (thread->IsWaitCancelled()) {
|
||||||
slp.CancelSleep();
|
slp.CancelSleep();
|
||||||
|
@ -66,73 +123,25 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||||
thread_nodes[i].thread = thread;
|
thread_nodes[i].thread = thread;
|
||||||
thread_nodes[i].next = nullptr;
|
thread_nodes[i].next = nullptr;
|
||||||
|
|
||||||
if (objects[i]->thread_list_tail == nullptr) {
|
objects[i]->LinkNode(std::addressof(thread_nodes[i]));
|
||||||
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
|
|
||||||
} else {
|
|
||||||
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
|
// Mark the thread as cancellable.
|
||||||
}
|
|
||||||
|
|
||||||
// For debugging only
|
|
||||||
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
|
||||||
|
|
||||||
// Mark the thread as waiting.
|
|
||||||
thread->SetCancellable();
|
thread->SetCancellable();
|
||||||
thread->SetSyncedObject(nullptr, ResultTimedOut);
|
|
||||||
thread->SetState(ThreadState::Waiting);
|
// Clear the thread's synced index.
|
||||||
|
thread->SetSyncedIndex(-1);
|
||||||
|
|
||||||
|
// Wait for an object to be signaled.
|
||||||
|
thread->BeginWait(std::addressof(wait_queue));
|
||||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The lock/sleep is done, so we should be able to get our result.
|
// Set the output index.
|
||||||
|
*out_index = thread->GetSyncedIndex();
|
||||||
// Thread is no longer cancellable.
|
|
||||||
thread->ClearCancellable();
|
|
||||||
|
|
||||||
// For debugging only
|
|
||||||
thread->SetWaitObjectsForDebugging({});
|
|
||||||
|
|
||||||
// Cancel the timer as needed.
|
|
||||||
kernel_ctx.TimeManager().UnscheduleTimeEvent(thread);
|
|
||||||
|
|
||||||
// Get the wait result.
|
// Get the wait result.
|
||||||
ResultCode wait_result{ResultSuccess};
|
return thread->GetWaitResult();
|
||||||
s32 sync_index = -1;
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock lock(kernel_ctx);
|
|
||||||
KSynchronizationObject* synced_obj;
|
|
||||||
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
|
||||||
|
|
||||||
for (auto i = 0; i < num_objects; ++i) {
|
|
||||||
// Unlink the object from the list.
|
|
||||||
ThreadListNode* prev_ptr =
|
|
||||||
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
|
|
||||||
ThreadListNode* prev_val = nullptr;
|
|
||||||
ThreadListNode *prev, *tail_prev;
|
|
||||||
|
|
||||||
do {
|
|
||||||
prev = prev_ptr;
|
|
||||||
prev_ptr = prev_ptr->next;
|
|
||||||
tail_prev = prev_val;
|
|
||||||
prev_val = prev_ptr;
|
|
||||||
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
|
||||||
|
|
||||||
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
|
|
||||||
objects[i]->thread_list_tail = tail_prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
prev->next = thread_nodes[i].next;
|
|
||||||
|
|
||||||
if (objects[i] == synced_obj) {
|
|
||||||
sync_index = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set output.
|
|
||||||
*out_index = sync_index;
|
|
||||||
return wait_result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||||
|
@ -141,7 +150,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||||
KSynchronizationObject::~KSynchronizationObject() = default;
|
KSynchronizationObject::~KSynchronizationObject() = default;
|
||||||
|
|
||||||
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// If we're not signaled, we've nothing to notify.
|
// If we're not signaled, we've nothing to notify.
|
||||||
if (!this->IsSignaled()) {
|
if (!this->IsSignaled()) {
|
||||||
|
@ -150,11 +159,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||||
|
|
||||||
// Iterate over each thread.
|
// Iterate over each thread.
|
||||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||||
KThread* thread = cur_node->thread;
|
cur_node->thread->NotifyAvailable(this, result);
|
||||||
if (thread->GetState() == ThreadState::Waiting) {
|
|
||||||
thread->SetSyncedObject(this, result);
|
|
||||||
thread->SetState(ThreadState::Runnable);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,38 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
||||||
|
|
||||||
|
void LinkNode(ThreadListNode* node_) {
|
||||||
|
// Link the node to the list.
|
||||||
|
if (thread_list_tail == nullptr) {
|
||||||
|
thread_list_head = node_;
|
||||||
|
} else {
|
||||||
|
thread_list_tail->next = node_;
|
||||||
|
}
|
||||||
|
|
||||||
|
thread_list_tail = node_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnlinkNode(ThreadListNode* node_) {
|
||||||
|
// Unlink the node from the list.
|
||||||
|
ThreadListNode* prev_ptr =
|
||||||
|
reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
|
||||||
|
ThreadListNode* prev_val = nullptr;
|
||||||
|
ThreadListNode *prev, *tail_prev;
|
||||||
|
|
||||||
|
do {
|
||||||
|
prev = prev_ptr;
|
||||||
|
prev_ptr = prev_ptr->next;
|
||||||
|
tail_prev = prev_val;
|
||||||
|
prev_val = prev_ptr;
|
||||||
|
} while (prev_ptr != node_);
|
||||||
|
|
||||||
|
if (thread_list_tail == node_) {
|
||||||
|
thread_list_tail = tail_prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
prev->next = node_->next;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit KSynchronizationObject(KernelCore& kernel);
|
explicit KSynchronizationObject(KernelCore& kernel);
|
||||||
~KSynchronizationObject() override;
|
~KSynchronizationObject() override;
|
||||||
|
|
|
@ -13,6 +13,9 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/fiber.h"
|
#include "common/fiber.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "common/scope_exit.h"
|
||||||
|
#include "common/settings.h"
|
||||||
|
#include "common/thread_queue_list.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/cpu_manager.h"
|
#include "core/cpu_manager.h"
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
|
@ -56,6 +59,34 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
|
||||||
|
public:
|
||||||
|
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
|
||||||
|
: KThreadQueueWithoutEndWait(kernel_) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
|
||||||
|
public:
|
||||||
|
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
|
||||||
|
: KThreadQueue(kernel_), m_wait_list(wl) {}
|
||||||
|
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) override {
|
||||||
|
// Remove the thread from the wait list.
|
||||||
|
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||||
|
|
||||||
|
// Invoke the base cancel wait handler.
|
||||||
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
KThread::WaiterList* m_wait_list;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
KThread::KThread(KernelCore& kernel_)
|
KThread::KThread(KernelCore& kernel_)
|
||||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
|
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
|
||||||
KThread::~KThread() = default;
|
KThread::~KThread() = default;
|
||||||
|
@ -82,6 +113,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType::HighPriority:
|
case ThreadType::HighPriority:
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
|
case ThreadType::Dummy:
|
||||||
|
[[fallthrough]];
|
||||||
case ThreadType::User:
|
case ThreadType::User:
|
||||||
ASSERT(((owner == nullptr) ||
|
ASSERT(((owner == nullptr) ||
|
||||||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
||||||
|
@ -127,11 +160,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
priority = prio;
|
priority = prio;
|
||||||
base_priority = prio;
|
base_priority = prio;
|
||||||
|
|
||||||
// Set sync object and waiting lock to null.
|
|
||||||
synced_object = nullptr;
|
|
||||||
|
|
||||||
// Initialize sleeping queue.
|
// Initialize sleeping queue.
|
||||||
sleeping_queue = nullptr;
|
wait_queue = nullptr;
|
||||||
|
|
||||||
// Set suspend flags.
|
// Set suspend flags.
|
||||||
suspend_request_flags = 0;
|
suspend_request_flags = 0;
|
||||||
|
@ -184,7 +214,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
// Setup the stack parameters.
|
// Setup the stack parameters.
|
||||||
StackParameters& sp = GetStackParameters();
|
StackParameters& sp = GetStackParameters();
|
||||||
sp.cur_thread = this;
|
sp.cur_thread = this;
|
||||||
sp.disable_count = 1;
|
sp.disable_count = 0;
|
||||||
SetInExceptionHandler();
|
SetInExceptionHandler();
|
||||||
|
|
||||||
// Set thread ID.
|
// Set thread ID.
|
||||||
|
@ -211,15 +241,16 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
|
||||||
// Initialize the thread.
|
// Initialize the thread.
|
||||||
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
||||||
|
|
||||||
// Initialize host context.
|
// Initialize emulation parameters.
|
||||||
thread->host_context =
|
thread->host_context =
|
||||||
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
|
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
|
||||||
|
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KThread::InitializeDummyThread(KThread* thread) {
|
ResultCode KThread::InitializeDummyThread(KThread* thread) {
|
||||||
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main);
|
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||||
|
@ -273,11 +304,14 @@ void KThread::Finalize() {
|
||||||
|
|
||||||
auto it = waiter_list.begin();
|
auto it = waiter_list.begin();
|
||||||
while (it != waiter_list.end()) {
|
while (it != waiter_list.end()) {
|
||||||
// The thread shouldn't be a kernel waiter.
|
// Clear the lock owner
|
||||||
it->SetLockOwner(nullptr);
|
it->SetLockOwner(nullptr);
|
||||||
it->SetSyncedObject(nullptr, ResultInvalidState);
|
|
||||||
it->Wakeup();
|
// Erase the waiter from our list.
|
||||||
it = waiter_list.erase(it);
|
it = waiter_list.erase(it);
|
||||||
|
|
||||||
|
// Cancel the thread's wait.
|
||||||
|
it->CancelWait(ResultInvalidState, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,15 +328,12 @@ bool KThread::IsSignaled() const {
|
||||||
return signaled;
|
return signaled;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::Wakeup() {
|
void KThread::OnTimer() {
|
||||||
KScopedSchedulerLock sl{kernel};
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
// If we're waiting, cancel the wait.
|
||||||
if (GetState() == ThreadState::Waiting) {
|
if (GetState() == ThreadState::Waiting) {
|
||||||
if (sleeping_queue != nullptr) {
|
wait_queue->CancelWait(this, ResultTimedOut, false);
|
||||||
sleeping_queue->WakeupThread(this);
|
|
||||||
} else {
|
|
||||||
SetState(ThreadState::Runnable);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,7 +358,7 @@ void KThread::StartTermination() {
|
||||||
|
|
||||||
// Signal.
|
// Signal.
|
||||||
signaled = true;
|
signaled = true;
|
||||||
NotifyAvailable();
|
KSynchronizationObject::NotifyAvailable();
|
||||||
|
|
||||||
// Clear previous thread in KScheduler.
|
// Clear previous thread in KScheduler.
|
||||||
KScheduler::ClearPreviousThread(kernel, this);
|
KScheduler::ClearPreviousThread(kernel, this);
|
||||||
|
@ -475,30 +506,32 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
ASSERT(parent != nullptr);
|
ASSERT(parent != nullptr);
|
||||||
ASSERT(v_affinity_mask != 0);
|
ASSERT(v_affinity_mask != 0);
|
||||||
KScopedLightLock lk{activity_pause_lock};
|
KScopedLightLock lk(activity_pause_lock);
|
||||||
|
|
||||||
// Set the core mask.
|
// Set the core mask.
|
||||||
u64 p_affinity_mask = 0;
|
u64 p_affinity_mask = 0;
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl(kernel);
|
||||||
ASSERT(num_core_migration_disables >= 0);
|
ASSERT(num_core_migration_disables >= 0);
|
||||||
|
|
||||||
// If the core id is no-update magic, preserve the ideal core id.
|
// If we're updating, set our ideal virtual core.
|
||||||
if (cpu_core_id == Svc::IdealCoreNoUpdate) {
|
if (core_id_ != Svc::IdealCoreNoUpdate) {
|
||||||
cpu_core_id = virtual_ideal_core_id;
|
virtual_ideal_core_id = core_id_;
|
||||||
R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
|
} else {
|
||||||
|
// Preserve our ideal core id.
|
||||||
|
core_id_ = virtual_ideal_core_id;
|
||||||
|
R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the virtual core/affinity mask.
|
// Set our affinity mask.
|
||||||
virtual_ideal_core_id = cpu_core_id;
|
|
||||||
virtual_affinity_mask = v_affinity_mask;
|
virtual_affinity_mask = v_affinity_mask;
|
||||||
|
|
||||||
// Translate the virtual core to a physical core.
|
// Translate the virtual core to a physical core.
|
||||||
if (cpu_core_id >= 0) {
|
if (core_id_ >= 0) {
|
||||||
cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id];
|
core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Translate the virtual affinity mask to a physical one.
|
// Translate the virtual affinity mask to a physical one.
|
||||||
|
@ -513,7 +546,7 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
||||||
const KAffinityMask old_mask = physical_affinity_mask;
|
const KAffinityMask old_mask = physical_affinity_mask;
|
||||||
|
|
||||||
// Set our new ideals.
|
// Set our new ideals.
|
||||||
physical_ideal_core_id = cpu_core_id;
|
physical_ideal_core_id = core_id_;
|
||||||
physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||||
|
|
||||||
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
||||||
|
@ -531,18 +564,18 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, we edit the original affinity for restoration later.
|
// Otherwise, we edit the original affinity for restoration later.
|
||||||
original_physical_ideal_core_id = cpu_core_id;
|
original_physical_ideal_core_id = core_id_;
|
||||||
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the pinned waiter list.
|
// Update the pinned waiter list.
|
||||||
|
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
|
||||||
{
|
{
|
||||||
bool retry_update{};
|
bool retry_update{};
|
||||||
bool thread_is_pinned{};
|
|
||||||
do {
|
do {
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// Don't do any further management if our termination has been requested.
|
// Don't do any further management if our termination has been requested.
|
||||||
R_SUCCEED_IF(IsTerminationRequested());
|
R_SUCCEED_IF(IsTerminationRequested());
|
||||||
|
@ -570,12 +603,9 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
||||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||||
ResultTerminationRequested);
|
ResultTerminationRequested);
|
||||||
|
|
||||||
// Note that the thread was pinned.
|
|
||||||
thread_is_pinned = true;
|
|
||||||
|
|
||||||
// Wait until the thread isn't pinned any more.
|
// Wait until the thread isn't pinned any more.
|
||||||
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
||||||
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
|
||||||
} else {
|
} else {
|
||||||
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
||||||
// not current.
|
// not current.
|
||||||
|
@ -583,16 +613,6 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (retry_update);
|
} while (retry_update);
|
||||||
|
|
||||||
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
|
||||||
// our waiter list.
|
|
||||||
if (thread_is_pinned) {
|
|
||||||
// Lock the scheduler.
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
// Remove from the list.
|
|
||||||
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
@ -641,15 +661,9 @@ void KThread::WaitCancel() {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Check if we're waiting and cancellable.
|
// Check if we're waiting and cancellable.
|
||||||
if (GetState() == ThreadState::Waiting && cancellable) {
|
if (this->GetState() == ThreadState::Waiting && cancellable) {
|
||||||
if (sleeping_queue != nullptr) {
|
|
||||||
sleeping_queue->WakeupThread(this);
|
|
||||||
wait_cancelled = true;
|
|
||||||
} else {
|
|
||||||
SetSyncedObject(nullptr, ResultCancelled);
|
|
||||||
SetState(ThreadState::Runnable);
|
|
||||||
wait_cancelled = false;
|
wait_cancelled = false;
|
||||||
}
|
wait_queue->CancelWait(this, ResultCancelled, true);
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, note that we cancelled a wait.
|
// Otherwise, note that we cancelled a wait.
|
||||||
wait_cancelled = true;
|
wait_cancelled = true;
|
||||||
|
@ -700,60 +714,59 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||||
// Set the activity.
|
// Set the activity.
|
||||||
{
|
{
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// Verify our state.
|
// Verify our state.
|
||||||
const auto cur_state = GetState();
|
const auto cur_state = this->GetState();
|
||||||
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
||||||
ResultInvalidState);
|
ResultInvalidState);
|
||||||
|
|
||||||
// Either pause or resume.
|
// Either pause or resume.
|
||||||
if (activity == Svc::ThreadActivity::Paused) {
|
if (activity == Svc::ThreadActivity::Paused) {
|
||||||
// Verify that we're not suspended.
|
// Verify that we're not suspended.
|
||||||
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||||
|
|
||||||
// Suspend.
|
// Suspend.
|
||||||
RequestSuspend(SuspendType::Thread);
|
this->RequestSuspend(SuspendType::Thread);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
||||||
|
|
||||||
// Verify that we're suspended.
|
// Verify that we're suspended.
|
||||||
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||||
|
|
||||||
// Resume.
|
// Resume.
|
||||||
Resume(SuspendType::Thread);
|
this->Resume(SuspendType::Thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the thread is now paused, update the pinned waiter list.
|
// If the thread is now paused, update the pinned waiter list.
|
||||||
if (activity == Svc::ThreadActivity::Paused) {
|
if (activity == Svc::ThreadActivity::Paused) {
|
||||||
bool thread_is_pinned{};
|
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
|
||||||
bool thread_is_current{};
|
std::addressof(pinned_waiter_list));
|
||||||
|
|
||||||
|
bool thread_is_current;
|
||||||
do {
|
do {
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
// Don't do any further management if our termination has been requested.
|
// Don't do any further management if our termination has been requested.
|
||||||
R_SUCCEED_IF(IsTerminationRequested());
|
R_SUCCEED_IF(this->IsTerminationRequested());
|
||||||
|
|
||||||
|
// By default, treat the thread as not current.
|
||||||
|
thread_is_current = false;
|
||||||
|
|
||||||
// Check whether the thread is pinned.
|
// Check whether the thread is pinned.
|
||||||
if (GetStackParameters().is_pinned) {
|
if (this->GetStackParameters().is_pinned) {
|
||||||
// Verify that the current thread isn't terminating.
|
// Verify that the current thread isn't terminating.
|
||||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||||
ResultTerminationRequested);
|
ResultTerminationRequested);
|
||||||
|
|
||||||
// Note that the thread was pinned and not current.
|
|
||||||
thread_is_pinned = true;
|
|
||||||
thread_is_current = false;
|
|
||||||
|
|
||||||
// Wait until the thread isn't pinned any more.
|
// Wait until the thread isn't pinned any more.
|
||||||
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
||||||
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
|
||||||
} else {
|
} else {
|
||||||
// Check if the thread is currently running.
|
// Check if the thread is currently running.
|
||||||
// If it is, we'll need to retry.
|
// If it is, we'll need to retry.
|
||||||
thread_is_current = false;
|
|
||||||
|
|
||||||
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||||
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
||||||
thread_is_current = true;
|
thread_is_current = true;
|
||||||
|
@ -762,16 +775,6 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (thread_is_current);
|
} while (thread_is_current);
|
||||||
|
|
||||||
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
|
||||||
// our waiter list.
|
|
||||||
if (thread_is_pinned) {
|
|
||||||
// Lock the scheduler.
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
// Remove from the list.
|
|
||||||
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
@ -966,6 +969,9 @@ ResultCode KThread::Run() {
|
||||||
|
|
||||||
// Set our state and finish.
|
// Set our state and finish.
|
||||||
SetState(ThreadState::Runnable);
|
SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
|
DisableDispatch();
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -996,29 +1002,63 @@ ResultCode KThread::Sleep(s64 timeout) {
|
||||||
ASSERT(this == GetCurrentThreadPointer(kernel));
|
ASSERT(this == GetCurrentThreadPointer(kernel));
|
||||||
ASSERT(timeout > 0);
|
ASSERT(timeout > 0);
|
||||||
|
|
||||||
|
ThreadQueueImplForKThreadSleep wait_queue_(kernel);
|
||||||
{
|
{
|
||||||
// Setup the scheduling lock and sleep.
|
// Setup the scheduling lock and sleep.
|
||||||
KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
|
KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
// Check if the thread should terminate.
|
||||||
if (IsTerminationRequested()) {
|
if (this->IsTerminationRequested()) {
|
||||||
slp.CancelSleep();
|
slp.CancelSleep();
|
||||||
return ResultTerminationRequested;
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark the thread as waiting.
|
// Wait for the sleep to end.
|
||||||
SetState(ThreadState::Waiting);
|
this->BeginWait(std::addressof(wait_queue_));
|
||||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The lock/sleep is done.
|
|
||||||
|
|
||||||
// Cancel the timer.
|
|
||||||
kernel.TimeManager().UnscheduleTimeEvent(this);
|
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KThread::BeginWait(KThreadQueue* queue) {
|
||||||
|
// Set our state as waiting.
|
||||||
|
SetState(ThreadState::Waiting);
|
||||||
|
|
||||||
|
// Set our wait queue.
|
||||||
|
wait_queue = queue;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
|
||||||
|
// Lock the scheduler.
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
// If we're waiting, notify our queue that we're available.
|
||||||
|
if (GetState() == ThreadState::Waiting) {
|
||||||
|
wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KThread::EndWait(ResultCode wait_result_) {
|
||||||
|
// Lock the scheduler.
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
// If we're waiting, notify our queue that we're available.
|
||||||
|
if (GetState() == ThreadState::Waiting) {
|
||||||
|
wait_queue->EndWait(this, wait_result_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
|
||||||
|
// Lock the scheduler.
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
// If we're waiting, notify our queue that we're available.
|
||||||
|
if (GetState() == ThreadState::Waiting) {
|
||||||
|
wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void KThread::SetState(ThreadState state) {
|
void KThread::SetState(ThreadState state) {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
@ -1050,4 +1090,26 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
|
||||||
return GetCurrentThread(kernel).GetCurrentCore();
|
return GetCurrentThread(kernel).GetCurrentCore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KScopedDisableDispatch::~KScopedDisableDispatch() {
|
||||||
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
|
if (kernel.IsShuttingDown()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the reschedule if single-core, as dispatch tracking is disabled here.
|
||||||
|
if (!Settings::values.use_multi_core.GetValue()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
|
||||||
|
auto scheduler = kernel.CurrentScheduler();
|
||||||
|
|
||||||
|
if (scheduler) {
|
||||||
|
scheduler->RescheduleCurrentCore();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
GetCurrentThread(kernel).EnableDispatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -48,6 +48,7 @@ enum class ThreadType : u32 {
|
||||||
Kernel = 1,
|
Kernel = 1,
|
||||||
HighPriority = 2,
|
HighPriority = 2,
|
||||||
User = 3,
|
User = 3,
|
||||||
|
Dummy = 100, // Special thread type for emulation purposes only
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
|
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
|
||||||
|
|
||||||
|
@ -161,8 +162,6 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Wakeup();
|
|
||||||
|
|
||||||
void SetBasePriority(s32 value);
|
void SetBasePriority(s32 value);
|
||||||
|
|
||||||
[[nodiscard]] ResultCode Run();
|
[[nodiscard]] ResultCode Run();
|
||||||
|
@ -197,13 +196,19 @@ public:
|
||||||
|
|
||||||
void Suspend();
|
void Suspend();
|
||||||
|
|
||||||
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
|
constexpr void SetSyncedIndex(s32 index) {
|
||||||
synced_object = obj;
|
synced_index = index;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr s32 GetSyncedIndex() const {
|
||||||
|
return synced_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr void SetWaitResult(ResultCode wait_res) {
|
||||||
wait_result = wait_res;
|
wait_result = wait_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
[[nodiscard]] constexpr ResultCode GetWaitResult() const {
|
||||||
*out = synced_object;
|
|
||||||
return wait_result;
|
return wait_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -374,6 +379,8 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] bool IsSignaled() const override;
|
[[nodiscard]] bool IsSignaled() const override;
|
||||||
|
|
||||||
|
void OnTimer();
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
|
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
|
||||||
|
@ -446,20 +453,39 @@ public:
|
||||||
return per_core_priority_queue_entry[core];
|
return per_core_priority_queue_entry[core];
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetSleepingQueue(KThreadQueue* q) {
|
[[nodiscard]] bool IsKernelThread() const {
|
||||||
sleeping_queue = q;
|
return GetActiveCore() == 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] bool IsDispatchTrackingDisabled() const {
|
||||||
|
return is_single_core || IsKernelThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
||||||
|
if (IsDispatchTrackingDisabled()) {
|
||||||
|
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return this->GetStackParameters().disable_count;
|
return this->GetStackParameters().disable_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisableDispatch() {
|
void DisableDispatch() {
|
||||||
|
if (IsDispatchTrackingDisabled()) {
|
||||||
|
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
||||||
this->GetStackParameters().disable_count++;
|
this->GetStackParameters().disable_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnableDispatch() {
|
void EnableDispatch() {
|
||||||
|
if (IsDispatchTrackingDisabled()) {
|
||||||
|
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
||||||
this->GetStackParameters().disable_count--;
|
this->GetStackParameters().disable_count--;
|
||||||
}
|
}
|
||||||
|
@ -573,6 +599,15 @@ public:
|
||||||
address_key_value = val;
|
address_key_value = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ClearWaitQueue() {
|
||||||
|
wait_queue = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BeginWait(KThreadQueue* queue);
|
||||||
|
void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
|
||||||
|
void EndWait(ResultCode wait_result_);
|
||||||
|
void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
|
||||||
|
|
||||||
[[nodiscard]] bool HasWaiters() const {
|
[[nodiscard]] bool HasWaiters() const {
|
||||||
return !waiter_list.empty();
|
return !waiter_list.empty();
|
||||||
}
|
}
|
||||||
|
@ -667,7 +702,6 @@ private:
|
||||||
KAffinityMask physical_affinity_mask{};
|
KAffinityMask physical_affinity_mask{};
|
||||||
u64 thread_id{};
|
u64 thread_id{};
|
||||||
std::atomic<s64> cpu_time{};
|
std::atomic<s64> cpu_time{};
|
||||||
KSynchronizationObject* synced_object{};
|
|
||||||
VAddr address_key{};
|
VAddr address_key{};
|
||||||
KProcess* parent{};
|
KProcess* parent{};
|
||||||
VAddr kernel_stack_top{};
|
VAddr kernel_stack_top{};
|
||||||
|
@ -677,13 +711,14 @@ private:
|
||||||
s64 schedule_count{};
|
s64 schedule_count{};
|
||||||
s64 last_scheduled_tick{};
|
s64 last_scheduled_tick{};
|
||||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||||
KThreadQueue* sleeping_queue{};
|
KThreadQueue* wait_queue{};
|
||||||
WaiterList waiter_list{};
|
WaiterList waiter_list{};
|
||||||
WaiterList pinned_waiter_list{};
|
WaiterList pinned_waiter_list{};
|
||||||
KThread* lock_owner{};
|
KThread* lock_owner{};
|
||||||
u32 address_key_value{};
|
u32 address_key_value{};
|
||||||
u32 suspend_request_flags{};
|
u32 suspend_request_flags{};
|
||||||
u32 suspend_allowed_flags{};
|
u32 suspend_allowed_flags{};
|
||||||
|
s32 synced_index{};
|
||||||
ResultCode wait_result{ResultSuccess};
|
ResultCode wait_result{ResultSuccess};
|
||||||
s32 base_priority{};
|
s32 base_priority{};
|
||||||
s32 physical_ideal_core_id{};
|
s32 physical_ideal_core_id{};
|
||||||
|
@ -708,6 +743,7 @@ private:
|
||||||
|
|
||||||
// For emulation
|
// For emulation
|
||||||
std::shared_ptr<Common::Fiber> host_context{};
|
std::shared_ptr<Common::Fiber> host_context{};
|
||||||
|
bool is_single_core{};
|
||||||
|
|
||||||
// For debugging
|
// For debugging
|
||||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||||
|
@ -752,4 +788,20 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class KScopedDisableDispatch {
|
||||||
|
public:
|
||||||
|
[[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
|
||||||
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
|
if (kernel.IsShuttingDown()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
GetCurrentThread(kernel).DisableDispatch();
|
||||||
|
}
|
||||||
|
|
||||||
|
~KScopedDisableDispatch();
|
||||||
|
|
||||||
|
private:
|
||||||
|
KernelCore& kernel;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
51
src/core/hle/kernel/k_thread_queue.cpp
Executable file
51
src/core/hle/kernel/k_thread_queue.cpp
Executable file
|
@ -0,0 +1,51 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
|
||||||
|
[[maybe_unused]] KSynchronizationObject* signaled_object,
|
||||||
|
[[maybe_unused]] ResultCode wait_result) {}
|
||||||
|
|
||||||
|
void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
|
||||||
|
// Set the thread's wait result.
|
||||||
|
waiting_thread->SetWaitResult(wait_result);
|
||||||
|
|
||||||
|
// Set the thread as runnable.
|
||||||
|
waiting_thread->SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
|
// Clear the thread's wait queue.
|
||||||
|
waiting_thread->ClearWaitQueue();
|
||||||
|
|
||||||
|
// Cancel the thread task.
|
||||||
|
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
|
bool cancel_timer_task) {
|
||||||
|
// Set the thread's wait result.
|
||||||
|
waiting_thread->SetWaitResult(wait_result);
|
||||||
|
|
||||||
|
// Set the thread as runnable.
|
||||||
|
waiting_thread->SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
|
// Clear the thread's wait queue.
|
||||||
|
waiting_thread->ClearWaitQueue();
|
||||||
|
|
||||||
|
// Cancel the thread task.
|
||||||
|
if (cancel_timer_task) {
|
||||||
|
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
|
||||||
|
[[maybe_unused]] ResultCode wait_result) {}
|
||||||
|
|
||||||
|
} // namespace Kernel
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
@ -11,71 +12,24 @@ namespace Kernel {
|
||||||
class KThreadQueue {
|
class KThreadQueue {
|
||||||
public:
|
public:
|
||||||
explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
|
explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
|
||||||
|
virtual ~KThreadQueue() = default;
|
||||||
|
|
||||||
bool IsEmpty() const {
|
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
||||||
return wait_list.empty();
|
ResultCode wait_result);
|
||||||
}
|
virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
|
||||||
|
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||||
KThread::WaiterList::iterator begin() {
|
bool cancel_timer_task);
|
||||||
return wait_list.begin();
|
|
||||||
}
|
|
||||||
KThread::WaiterList::iterator end() {
|
|
||||||
return wait_list.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SleepThread(KThread* t) {
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
// If the thread needs terminating, don't enqueue it.
|
|
||||||
if (t->IsTerminationRequested()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the thread's queue and mark it as waiting.
|
|
||||||
t->SetSleepingQueue(this);
|
|
||||||
t->SetState(ThreadState::Waiting);
|
|
||||||
|
|
||||||
// Add the thread to the queue.
|
|
||||||
wait_list.push_back(*t);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void WakeupThread(KThread* t) {
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
// Remove the thread from the queue.
|
|
||||||
wait_list.erase(wait_list.iterator_to(*t));
|
|
||||||
|
|
||||||
// Mark the thread as no longer sleeping.
|
|
||||||
t->SetState(ThreadState::Runnable);
|
|
||||||
t->SetSleepingQueue(nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
KThread* WakeupFrontThread() {
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
if (wait_list.empty()) {
|
|
||||||
return nullptr;
|
|
||||||
} else {
|
|
||||||
// Remove the thread from the queue.
|
|
||||||
auto it = wait_list.begin();
|
|
||||||
KThread* thread = std::addressof(*it);
|
|
||||||
wait_list.erase(it);
|
|
||||||
|
|
||||||
ASSERT(thread->GetState() == ThreadState::Waiting);
|
|
||||||
|
|
||||||
// Mark the thread as no longer sleeping.
|
|
||||||
thread->SetState(ThreadState::Runnable);
|
|
||||||
thread->SetSleepingQueue(nullptr);
|
|
||||||
|
|
||||||
return thread;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
KThread::WaiterList wait_list{};
|
KThread::WaiterList wait_list{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class KThreadQueueWithoutEndWait : public KThreadQueue {
|
||||||
|
public:
|
||||||
|
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||||
|
|
||||||
|
virtual void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
|
#include "common/scope_exit.h"
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "common/thread_worker.h"
|
#include "common/thread_worker.h"
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
|
@ -83,12 +84,16 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeCores() {
|
void InitializeCores() {
|
||||||
for (auto& core : cores) {
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
core.Initialize(current_process->Is64BitProcess());
|
cores[core_id].Initialize(current_process->Is64BitProcess());
|
||||||
|
system.Memory().SetCurrentPageTable(*current_process, core_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Shutdown() {
|
void Shutdown() {
|
||||||
|
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||||
|
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||||
|
|
||||||
process_list.clear();
|
process_list.clear();
|
||||||
|
|
||||||
// Close all open server ports.
|
// Close all open server ports.
|
||||||
|
@ -123,15 +128,6 @@ struct KernelCore::Impl {
|
||||||
next_user_process_id = KProcess::ProcessIDMin;
|
next_user_process_id = KProcess::ProcessIDMin;
|
||||||
next_thread_id = 1;
|
next_thread_id = 1;
|
||||||
|
|
||||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
|
||||||
if (suspend_threads[core_id]) {
|
|
||||||
suspend_threads[core_id]->Close();
|
|
||||||
suspend_threads[core_id] = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedulers[core_id].reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
cores.clear();
|
cores.clear();
|
||||||
|
|
||||||
global_handle_table->Finalize();
|
global_handle_table->Finalize();
|
||||||
|
@ -159,6 +155,16 @@ struct KernelCore::Impl {
|
||||||
CleanupObject(time_shared_mem);
|
CleanupObject(time_shared_mem);
|
||||||
CleanupObject(system_resource_limit);
|
CleanupObject(system_resource_limit);
|
||||||
|
|
||||||
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
|
if (suspend_threads[core_id]) {
|
||||||
|
suspend_threads[core_id]->Close();
|
||||||
|
suspend_threads[core_id] = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
schedulers[core_id]->Finalize();
|
||||||
|
schedulers[core_id].reset();
|
||||||
|
}
|
||||||
|
|
||||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||||
|
|
||||||
|
@ -245,13 +251,11 @@ struct KernelCore::Impl {
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
global_scheduler_context->PreemptThreads();
|
global_scheduler_context->PreemptThreads();
|
||||||
}
|
}
|
||||||
const auto time_interval = std::chrono::nanoseconds{
|
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
|
||||||
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
|
||||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
});
|
});
|
||||||
|
|
||||||
const auto time_interval =
|
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
|
||||||
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
|
||||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,14 +271,6 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void MakeCurrentProcess(KProcess* process) {
|
void MakeCurrentProcess(KProcess* process) {
|
||||||
current_process = process;
|
current_process = process;
|
||||||
if (process == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const u32 core_id = GetCurrentHostThreadID();
|
|
||||||
if (core_id < Core::Hardware::NUM_CPU_CORES) {
|
|
||||||
system.Memory().SetCurrentPageTable(*process, core_id);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
||||||
|
@ -344,7 +340,16 @@ struct KernelCore::Impl {
|
||||||
is_phantom_mode_for_singlecore = value;
|
is_phantom_mode_for_singlecore = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsShuttingDown() const {
|
||||||
|
return is_shutting_down.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
KThread* GetCurrentEmuThread() {
|
KThread* GetCurrentEmuThread() {
|
||||||
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
|
if (IsShuttingDown()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
const auto thread_id = GetCurrentHostThreadID();
|
const auto thread_id = GetCurrentHostThreadID();
|
||||||
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
return GetHostDummyThread();
|
return GetHostDummyThread();
|
||||||
|
@ -760,6 +765,7 @@ struct KernelCore::Impl {
|
||||||
std::vector<std::unique_ptr<KThread>> dummy_threads;
|
std::vector<std::unique_ptr<KThread>> dummy_threads;
|
||||||
|
|
||||||
bool is_multicore{};
|
bool is_multicore{};
|
||||||
|
std::atomic_bool is_shutting_down{};
|
||||||
bool is_phantom_mode_for_singlecore{};
|
bool is_phantom_mode_for_singlecore{};
|
||||||
u32 single_core_thread_id{};
|
u32 single_core_thread_id{};
|
||||||
|
|
||||||
|
@ -845,16 +851,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
||||||
return impl->cores[id];
|
return impl->cores[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t KernelCore::CurrentPhysicalCoreIndex() const {
|
||||||
|
const u32 core_id = impl->GetCurrentHostThreadID();
|
||||||
|
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
|
return Core::Hardware::NUM_CPU_CORES - 1;
|
||||||
|
}
|
||||||
|
return core_id;
|
||||||
|
}
|
||||||
|
|
||||||
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
||||||
u32 core_id = impl->GetCurrentHostThreadID();
|
return impl->cores[CurrentPhysicalCoreIndex()];
|
||||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return impl->cores[core_id];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
||||||
u32 core_id = impl->GetCurrentHostThreadID();
|
return impl->cores[CurrentPhysicalCoreIndex()];
|
||||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
|
||||||
return impl->cores[core_id];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
||||||
|
@ -1057,6 +1067,9 @@ void KernelCore::Suspend(bool in_suspention) {
|
||||||
impl->suspend_threads[core_id]->SetState(state);
|
impl->suspend_threads[core_id]->SetState(state);
|
||||||
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
||||||
ThreadWaitReasonForDebugging::Suspended);
|
ThreadWaitReasonForDebugging::Suspended);
|
||||||
|
if (!should_suspend) {
|
||||||
|
impl->suspend_threads[core_id]->DisableDispatch();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1065,19 +1078,21 @@ bool KernelCore::IsMulticore() const {
|
||||||
return impl->is_multicore;
|
return impl->is_multicore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool KernelCore::IsShuttingDown() const {
|
||||||
|
return impl->IsShuttingDown();
|
||||||
|
}
|
||||||
|
|
||||||
void KernelCore::ExceptionalExit() {
|
void KernelCore::ExceptionalExit() {
|
||||||
exception_exited = true;
|
exception_exited = true;
|
||||||
Suspend(true);
|
Suspend(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::EnterSVCProfile() {
|
void KernelCore::EnterSVCProfile() {
|
||||||
std::size_t core = impl->GetCurrentHostThreadID();
|
impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
||||||
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::ExitSVCProfile() {
|
void KernelCore::ExitSVCProfile() {
|
||||||
std::size_t core = impl->GetCurrentHostThreadID();
|
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
|
||||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||||
|
|
|
@ -148,6 +148,9 @@ public:
|
||||||
/// Gets the an instance of the respective physical CPU core.
|
/// Gets the an instance of the respective physical CPU core.
|
||||||
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
||||||
|
|
||||||
|
/// Gets the current physical core index for the running host thread.
|
||||||
|
std::size_t CurrentPhysicalCoreIndex() const;
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler at the current running core.
|
/// Gets the sole instance of the Scheduler at the current running core.
|
||||||
Kernel::KScheduler* CurrentScheduler();
|
Kernel::KScheduler* CurrentScheduler();
|
||||||
|
|
||||||
|
@ -271,6 +274,8 @@ public:
|
||||||
|
|
||||||
bool IsMulticore() const;
|
bool IsMulticore() const;
|
||||||
|
|
||||||
|
bool IsShuttingDown() const;
|
||||||
|
|
||||||
void EnterSVCProfile();
|
void EnterSVCProfile();
|
||||||
|
|
||||||
void ExitSVCProfile();
|
void ExitSVCProfile();
|
||||||
|
|
|
@ -25,24 +25,27 @@ public:
|
||||||
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
|
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<std::thread> threads;
|
std::vector<std::jthread> threads;
|
||||||
std::queue<std::function<void()>> requests;
|
std::queue<std::function<void()>> requests;
|
||||||
std::mutex queue_mutex;
|
std::mutex queue_mutex;
|
||||||
std::condition_variable condition;
|
std::condition_variable_any condition;
|
||||||
const std::string service_name;
|
const std::string service_name;
|
||||||
bool stop{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||||
: service_name{name} {
|
: service_name{name} {
|
||||||
for (std::size_t i = 0; i < num_threads; ++i)
|
for (std::size_t i = 0; i < num_threads; ++i) {
|
||||||
threads.emplace_back([this, &kernel] {
|
threads.emplace_back([this, &kernel](std::stop_token stop_token) {
|
||||||
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
|
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
|
||||||
|
|
||||||
// Wait for first request before trying to acquire a render context
|
// Wait for first request before trying to acquire a render context
|
||||||
{
|
{
|
||||||
std::unique_lock lock{queue_mutex};
|
std::unique_lock lock{queue_mutex};
|
||||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stop_token.stop_requested()) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel.RegisterHostThread();
|
kernel.RegisterHostThread();
|
||||||
|
@ -52,10 +55,16 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||||
|
|
||||||
{
|
{
|
||||||
std::unique_lock lock{queue_mutex};
|
std::unique_lock lock{queue_mutex};
|
||||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
|
||||||
if (stop || requests.empty()) {
|
|
||||||
|
if (stop_token.stop_requested()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (requests.empty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
task = std::move(requests.front());
|
task = std::move(requests.front());
|
||||||
requests.pop();
|
requests.pop();
|
||||||
}
|
}
|
||||||
|
@ -63,6 +72,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||||
task();
|
task();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
||||||
|
@ -87,16 +97,7 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
||||||
condition.notify_one();
|
condition.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
ServiceThread::Impl::~Impl() {
|
ServiceThread::Impl::~Impl() = default;
|
||||||
{
|
|
||||||
std::unique_lock lock{queue_mutex};
|
|
||||||
stop = true;
|
|
||||||
}
|
|
||||||
condition.notify_all();
|
|
||||||
for (std::thread& thread : threads) {
|
|
||||||
thread.join();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||||
: impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
|
: impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "core/hle/kernel/k_shared_memory.h"
|
#include "core/hle/kernel/k_shared_memory.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
#include "core/hle/kernel/k_thread_queue.h"
|
||||||
#include "core/hle/kernel/k_transfer_memory.h"
|
#include "core/hle/kernel/k_transfer_memory.h"
|
||||||
#include "core/hle/kernel/k_writable_event.h"
|
#include "core/hle/kernel/k_writable_event.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
@ -307,26 +308,29 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
||||||
|
|
||||||
/// Makes a blocking IPC call to an OS service.
|
/// Makes a blocking IPC call to an OS service.
|
||||||
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
|
// Create the wait queue.
|
||||||
|
KThreadQueue wait_queue(kernel);
|
||||||
|
|
||||||
|
// Get the client session from its handle.
|
||||||
|
KScopedAutoObject session =
|
||||||
|
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
|
||||||
|
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
|
||||||
|
|
||||||
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||||
|
|
||||||
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
thread->SetState(ThreadState::Waiting);
|
|
||||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
|
||||||
|
|
||||||
{
|
// This is a synchronous request, so we should wait for our request to complete.
|
||||||
KScopedAutoObject session =
|
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
|
||||||
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
|
GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
||||||
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
|
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
|
||||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
|
||||||
session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KSynchronizationObject* dummy{};
|
return thread->GetWaitResult();
|
||||||
return thread->GetWaitResult(std::addressof(dummy));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
||||||
|
@ -873,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||||
const u64 thread_ticks = current_thread->GetCpuTime();
|
const u64 thread_ticks = current_thread->GetCpuTime();
|
||||||
|
|
||||||
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
||||||
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
|
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
|
||||||
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -887,7 +891,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||||
return ResultInvalidHandle;
|
return ResultInvalidHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) {
|
if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
|
||||||
|
info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
|
||||||
LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
|
LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
|
||||||
return ResultInvalidCombination;
|
return ResultInvalidCombination;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
|
@ -15,7 +16,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||||
Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
|
Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
|
||||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||||
KThread* thread = reinterpret_cast<KThread*>(thread_handle);
|
KThread* thread = reinterpret_cast<KThread*>(thread_handle);
|
||||||
thread->Wakeup();
|
{
|
||||||
|
KScopedSchedulerLock sl(system.Kernel());
|
||||||
|
thread->OnTimer();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -430,7 +430,7 @@ void EmitSetSampleMask(EmitContext& ctx, Id value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitSetFragDepth(EmitContext& ctx, Id value) {
|
void EmitSetFragDepth(EmitContext& ctx, Id value) {
|
||||||
if (!ctx.runtime_info.convert_depth_mode || ctx.profile.support_native_ndc) {
|
if (!ctx.runtime_info.convert_depth_mode) {
|
||||||
ctx.OpStore(ctx.frag_depth, value);
|
ctx.OpStore(ctx.frag_depth, value);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,8 +116,7 @@ void EmitPrologue(EmitContext& ctx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitEpilogue(EmitContext& ctx) {
|
void EmitEpilogue(EmitContext& ctx) {
|
||||||
if (ctx.stage == Stage::VertexB && ctx.runtime_info.convert_depth_mode &&
|
if (ctx.stage == Stage::VertexB && ctx.runtime_info.convert_depth_mode) {
|
||||||
!ctx.profile.support_native_ndc) {
|
|
||||||
ConvertDepthMode(ctx);
|
ConvertDepthMode(ctx);
|
||||||
}
|
}
|
||||||
if (ctx.stage == Stage::Fragment) {
|
if (ctx.stage == Stage::Fragment) {
|
||||||
|
@ -126,7 +125,7 @@ void EmitEpilogue(EmitContext& ctx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
|
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
|
||||||
if (ctx.runtime_info.convert_depth_mode && !ctx.profile.support_native_ndc) {
|
if (ctx.runtime_info.convert_depth_mode) {
|
||||||
ConvertDepthMode(ctx);
|
ConvertDepthMode(ctx);
|
||||||
}
|
}
|
||||||
if (stream.IsImmediate()) {
|
if (stream.IsImmediate()) {
|
||||||
|
|
|
@ -36,7 +36,6 @@ struct Profile {
|
||||||
bool support_int64_atomics{};
|
bool support_int64_atomics{};
|
||||||
bool support_derivative_control{};
|
bool support_derivative_control{};
|
||||||
bool support_geometry_shader_passthrough{};
|
bool support_geometry_shader_passthrough{};
|
||||||
bool support_native_ndc{};
|
|
||||||
bool support_gl_nv_gpu_shader_5{};
|
bool support_gl_nv_gpu_shader_5{};
|
||||||
bool support_gl_amd_gpu_shader_half_float{};
|
bool support_gl_amd_gpu_shader_half_float{};
|
||||||
bool support_gl_texture_shadow_lod{};
|
bool support_gl_texture_shadow_lod{};
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
@ -22,9 +23,11 @@ std::array<s64, 5> delays{};
|
||||||
|
|
||||||
std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
||||||
u64 expected_callback = 0;
|
u64 expected_callback = 0;
|
||||||
|
std::mutex control_mutex;
|
||||||
|
|
||||||
template <unsigned int IDX>
|
template <unsigned int IDX>
|
||||||
void HostCallbackTemplate(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
void HostCallbackTemplate(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||||
|
std::unique_lock<std::mutex> lk(control_mutex);
|
||||||
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
||||||
callbacks_ran_flags.set(IDX);
|
callbacks_ran_flags.set(IDX);
|
||||||
REQUIRE(CB_IDS[IDX] == user_data);
|
REQUIRE(CB_IDS[IDX] == user_data);
|
||||||
|
|
|
@ -194,7 +194,6 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
|
||||||
.support_int64_atomics = false,
|
.support_int64_atomics = false,
|
||||||
.support_derivative_control = device.HasDerivativeControl(),
|
.support_derivative_control = device.HasDerivativeControl(),
|
||||||
.support_geometry_shader_passthrough = device.HasGeometryShaderPassthrough(),
|
.support_geometry_shader_passthrough = device.HasGeometryShaderPassthrough(),
|
||||||
.support_native_ndc = true,
|
|
||||||
.support_gl_nv_gpu_shader_5 = device.HasNvGpuShader5(),
|
.support_gl_nv_gpu_shader_5 = device.HasNvGpuShader5(),
|
||||||
.support_gl_amd_gpu_shader_half_float = device.HasAmdShaderHalfFloat(),
|
.support_gl_amd_gpu_shader_half_float = device.HasAmdShaderHalfFloat(),
|
||||||
.support_gl_texture_shadow_lod = device.HasTextureShadowLod(),
|
.support_gl_texture_shadow_lod = device.HasTextureShadowLod(),
|
||||||
|
|
|
@ -613,35 +613,26 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
.patchControlPoints = key.state.patch_control_points_minus_one.Value() + 1,
|
.patchControlPoints = key.state.patch_control_points_minus_one.Value() + 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
std::array<VkViewportSwizzleNV, Maxwell::NumViewports> swizzles;
|
std::array<VkViewportSwizzleNV, Maxwell::NumViewports> swizzles;
|
||||||
std::ranges::transform(key.state.viewport_swizzles, swizzles.begin(), UnpackViewportSwizzle);
|
std::ranges::transform(key.state.viewport_swizzles, swizzles.begin(), UnpackViewportSwizzle);
|
||||||
VkPipelineViewportSwizzleStateCreateInfoNV swizzle_ci{
|
const VkPipelineViewportSwizzleStateCreateInfoNV swizzle_ci{
|
||||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
.viewportCount = Maxwell::NumViewports,
|
.viewportCount = Maxwell::NumViewports,
|
||||||
.pViewportSwizzles = swizzles.data(),
|
.pViewportSwizzles = swizzles.data(),
|
||||||
};
|
};
|
||||||
VkPipelineViewportDepthClipControlCreateInfoEXT ndc_info{
|
const VkPipelineViewportStateCreateInfo viewport_ci{
|
||||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT,
|
|
||||||
.pNext = nullptr,
|
|
||||||
.negativeOneToOne = key.state.ndc_minus_one_to_one.Value() != 0 ? VK_TRUE : VK_FALSE,
|
|
||||||
};
|
|
||||||
VkPipelineViewportStateCreateInfo viewport_ci{
|
|
||||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||||
.pNext = nullptr,
|
.pNext = device.IsNvViewportSwizzleSupported() ? &swizzle_ci : nullptr,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
.viewportCount = Maxwell::NumViewports,
|
.viewportCount = Maxwell::NumViewports,
|
||||||
.pViewports = nullptr,
|
.pViewports = nullptr,
|
||||||
.scissorCount = Maxwell::NumViewports,
|
.scissorCount = Maxwell::NumViewports,
|
||||||
.pScissors = nullptr,
|
.pScissors = nullptr,
|
||||||
};
|
};
|
||||||
if (device.IsNvViewportSwizzleSupported()) {
|
|
||||||
swizzle_ci.pNext = std::exchange(viewport_ci.pNext, &swizzle_ci);
|
|
||||||
}
|
|
||||||
if (device.IsExtDepthClipControlSupported()) {
|
|
||||||
ndc_info.pNext = std::exchange(viewport_ci.pNext, &ndc_info);
|
|
||||||
}
|
|
||||||
VkPipelineRasterizationStateCreateInfo rasterization_ci{
|
VkPipelineRasterizationStateCreateInfo rasterization_ci{
|
||||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
|
|
|
@ -311,7 +311,6 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxw
|
||||||
.support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
|
.support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
|
||||||
.support_derivative_control = true,
|
.support_derivative_control = true,
|
||||||
.support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
|
.support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
|
||||||
.support_native_ndc = device.IsExtDepthClipControlSupported(),
|
|
||||||
|
|
||||||
.warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
|
.warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
|
||||||
|
|
||||||
|
|
|
@ -556,16 +556,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
||||||
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
|
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
|
||||||
}
|
}
|
||||||
|
|
||||||
VkPhysicalDeviceDepthClipControlFeaturesEXT depth_clip_control_features;
|
|
||||||
if (ext_depth_clip_control) {
|
|
||||||
depth_clip_control_features = {
|
|
||||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT,
|
|
||||||
.pNext = nullptr,
|
|
||||||
.depthClipControl = VK_TRUE,
|
|
||||||
};
|
|
||||||
SetNext(next, depth_clip_control_features);
|
|
||||||
}
|
|
||||||
|
|
||||||
VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv;
|
VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv;
|
||||||
if (Settings::values.enable_nsight_aftermath && nv_device_diagnostics_config) {
|
if (Settings::values.enable_nsight_aftermath && nv_device_diagnostics_config) {
|
||||||
nsight_aftermath_tracker = std::make_unique<NsightAftermathTracker>();
|
nsight_aftermath_tracker = std::make_unique<NsightAftermathTracker>();
|
||||||
|
@ -900,7 +890,6 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
|
||||||
bool has_ext_provoking_vertex{};
|
bool has_ext_provoking_vertex{};
|
||||||
bool has_ext_vertex_input_dynamic_state{};
|
bool has_ext_vertex_input_dynamic_state{};
|
||||||
bool has_ext_line_rasterization{};
|
bool has_ext_line_rasterization{};
|
||||||
bool has_ext_depth_clip_control{};
|
|
||||||
for (const std::string& extension : supported_extensions) {
|
for (const std::string& extension : supported_extensions) {
|
||||||
const auto test = [&](std::optional<std::reference_wrapper<bool>> status, const char* name,
|
const auto test = [&](std::optional<std::reference_wrapper<bool>> status, const char* name,
|
||||||
bool push) {
|
bool push) {
|
||||||
|
@ -932,7 +921,6 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
|
||||||
test(ext_shader_stencil_export, VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME, true);
|
test(ext_shader_stencil_export, VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME, true);
|
||||||
test(ext_conservative_rasterization, VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME,
|
test(ext_conservative_rasterization, VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME,
|
||||||
true);
|
true);
|
||||||
test(has_ext_depth_clip_control, VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME, false);
|
|
||||||
test(has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME, false);
|
test(has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME, false);
|
||||||
test(has_ext_custom_border_color, VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME, false);
|
test(has_ext_custom_border_color, VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME, false);
|
||||||
test(has_ext_extended_dynamic_state, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME, false);
|
test(has_ext_extended_dynamic_state, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME, false);
|
||||||
|
@ -1095,19 +1083,6 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
|
||||||
ext_line_rasterization = true;
|
ext_line_rasterization = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (has_ext_depth_clip_control) {
|
|
||||||
VkPhysicalDeviceDepthClipControlFeaturesEXT depth_clip_control_features;
|
|
||||||
depth_clip_control_features.sType =
|
|
||||||
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT;
|
|
||||||
depth_clip_control_features.pNext = nullptr;
|
|
||||||
features.pNext = &depth_clip_control_features;
|
|
||||||
physical.GetFeatures2KHR(features);
|
|
||||||
|
|
||||||
if (depth_clip_control_features.depthClipControl) {
|
|
||||||
extensions.push_back(VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME);
|
|
||||||
ext_depth_clip_control = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (has_khr_workgroup_memory_explicit_layout) {
|
if (has_khr_workgroup_memory_explicit_layout) {
|
||||||
VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR layout;
|
VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR layout;
|
||||||
layout.sType =
|
layout.sType =
|
||||||
|
|
|
@ -253,11 +253,6 @@ public:
|
||||||
return ext_depth_range_unrestricted;
|
return ext_depth_range_unrestricted;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the device supports VK_EXT_depth_clip_control.
|
|
||||||
bool IsExtDepthClipControlSupported() const {
|
|
||||||
return ext_depth_clip_control;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true if the device supports VK_EXT_shader_viewport_index_layer.
|
/// Returns true if the device supports VK_EXT_shader_viewport_index_layer.
|
||||||
bool IsExtShaderViewportIndexLayerSupported() const {
|
bool IsExtShaderViewportIndexLayerSupported() const {
|
||||||
return ext_shader_viewport_index_layer;
|
return ext_shader_viewport_index_layer;
|
||||||
|
@ -417,7 +412,6 @@ private:
|
||||||
bool khr_swapchain_mutable_format{}; ///< Support for VK_KHR_swapchain_mutable_format.
|
bool khr_swapchain_mutable_format{}; ///< Support for VK_KHR_swapchain_mutable_format.
|
||||||
bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
|
bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
|
||||||
bool ext_sampler_filter_minmax{}; ///< Support for VK_EXT_sampler_filter_minmax.
|
bool ext_sampler_filter_minmax{}; ///< Support for VK_EXT_sampler_filter_minmax.
|
||||||
bool ext_depth_clip_control{}; ///< Support for VK_EXT_depth_clip_control
|
|
||||||
bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
|
bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
|
||||||
bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
|
bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
|
||||||
bool ext_tooling_info{}; ///< Support for VK_EXT_tooling_info.
|
bool ext_tooling_info{}; ///< Support for VK_EXT_tooling_info.
|
||||||
|
|
Loading…
Reference in a new issue