diff options
Diffstat (limited to 'src')
117 files changed, 9450 insertions, 6501 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d7f68618c..d2ca4904a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt | |||
| @@ -21,7 +21,7 @@ if (MSVC) | |||
| 21 | # Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors. | 21 | # Avoid windows.h from including some usually unused libs like winsocks.h, since this might cause some redefinition errors. |
| 22 | add_definitions(-DWIN32_LEAN_AND_MEAN) | 22 | add_definitions(-DWIN32_LEAN_AND_MEAN) |
| 23 | 23 | ||
| 24 | # Ensure that projects build with Unicode support. | 24 | # Ensure that projects are built with Unicode support. |
| 25 | add_definitions(-DUNICODE -D_UNICODE) | 25 | add_definitions(-DUNICODE -D_UNICODE) |
| 26 | 26 | ||
| 27 | # /W4 - Level 4 warnings | 27 | # /W4 - Level 4 warnings |
| @@ -54,11 +54,11 @@ if (MSVC) | |||
| 54 | /GT | 54 | /GT |
| 55 | 55 | ||
| 56 | # Modules | 56 | # Modules |
| 57 | /experimental:module- # Disable module support explicitly due to conflicts with precompiled headers | 57 | /experimental:module- # Explicitly disable module support due to conflicts with precompiled headers. |
| 58 | 58 | ||
| 59 | # External headers diagnostics | 59 | # External headers diagnostics |
| 60 | /external:anglebrackets # Treats all headers included by #include <header>, where the header file is enclosed in angle brackets (< >), as external headers | 60 | /external:anglebrackets # Treats all headers included by #include <header>, where the header file is enclosed in angle brackets (< >), as external headers |
| 61 | /external:W0 # Sets the default warning level to 0 for external headers, effectively turning off warnings for external headers | 61 | /external:W0 # Sets the default warning level to 0 for external headers, effectively disabling warnings for them. |
| 62 | 62 | ||
| 63 | # Warnings | 63 | # Warnings |
| 64 | /W4 | 64 | /W4 |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt index 08e2a973d..2bf0e1b0d 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/Settings.kt | |||
| @@ -82,7 +82,6 @@ object Settings { | |||
| 82 | 82 | ||
| 83 | enum class MenuTag(val titleId: Int) { | 83 | enum class MenuTag(val titleId: Int) { |
| 84 | SECTION_ROOT(R.string.advanced_settings), | 84 | SECTION_ROOT(R.string.advanced_settings), |
| 85 | SECTION_GENERAL(R.string.preferences_general), | ||
| 86 | SECTION_SYSTEM(R.string.preferences_system), | 85 | SECTION_SYSTEM(R.string.preferences_system), |
| 87 | SECTION_RENDERER(R.string.preferences_graphics), | 86 | SECTION_RENDERER(R.string.preferences_graphics), |
| 88 | SECTION_AUDIO(R.string.preferences_audio), | 87 | SECTION_AUDIO(R.string.preferences_audio), |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt index 522cc49df..425160024 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/RunnableSetting.kt | |||
| @@ -3,10 +3,13 @@ | |||
| 3 | 3 | ||
| 4 | package org.yuzu.yuzu_emu.features.settings.model.view | 4 | package org.yuzu.yuzu_emu.features.settings.model.view |
| 5 | 5 | ||
| 6 | import androidx.annotation.DrawableRes | ||
| 7 | |||
| 6 | class RunnableSetting( | 8 | class RunnableSetting( |
| 7 | titleId: Int, | 9 | titleId: Int, |
| 8 | descriptionId: Int, | 10 | descriptionId: Int, |
| 9 | val isRuntimeRunnable: Boolean, | 11 | val isRuntimeRunnable: Boolean, |
| 12 | @DrawableRes val iconId: Int = 0, | ||
| 10 | val runnable: () -> Unit | 13 | val runnable: () -> Unit |
| 11 | ) : SettingsItem(emptySetting, titleId, descriptionId) { | 14 | ) : SettingsItem(emptySetting, titleId, descriptionId) { |
| 12 | override val type = TYPE_RUNNABLE | 15 | override val type = TYPE_RUNNABLE |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt index b343e527e..94953b18a 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/model/view/SubmenuSetting.kt | |||
| @@ -3,11 +3,14 @@ | |||
| 3 | 3 | ||
| 4 | package org.yuzu.yuzu_emu.features.settings.model.view | 4 | package org.yuzu.yuzu_emu.features.settings.model.view |
| 5 | 5 | ||
| 6 | import androidx.annotation.DrawableRes | ||
| 7 | import androidx.annotation.StringRes | ||
| 6 | import org.yuzu.yuzu_emu.features.settings.model.Settings | 8 | import org.yuzu.yuzu_emu.features.settings.model.Settings |
| 7 | 9 | ||
| 8 | class SubmenuSetting( | 10 | class SubmenuSetting( |
| 9 | titleId: Int, | 11 | @StringRes titleId: Int, |
| 10 | descriptionId: Int, | 12 | @StringRes descriptionId: Int, |
| 13 | @DrawableRes val iconId: Int, | ||
| 11 | val menuKey: Settings.MenuTag | 14 | val menuKey: Settings.MenuTag |
| 12 | ) : SettingsItem(emptySetting, titleId, descriptionId) { | 15 | ) : SettingsItem(emptySetting, titleId, descriptionId) { |
| 13 | override val type = TYPE_SUBMENU | 16 | override val type = TYPE_SUBMENU |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt index 70d8ec14b..769baf744 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragment.kt | |||
| @@ -20,7 +20,6 @@ import androidx.lifecycle.repeatOnLifecycle | |||
| 20 | import androidx.navigation.findNavController | 20 | import androidx.navigation.findNavController |
| 21 | import androidx.navigation.fragment.navArgs | 21 | import androidx.navigation.fragment.navArgs |
| 22 | import androidx.recyclerview.widget.LinearLayoutManager | 22 | import androidx.recyclerview.widget.LinearLayoutManager |
| 23 | import com.google.android.material.divider.MaterialDividerItemDecoration | ||
| 24 | import com.google.android.material.transition.MaterialSharedAxis | 23 | import com.google.android.material.transition.MaterialSharedAxis |
| 25 | import kotlinx.coroutines.flow.collectLatest | 24 | import kotlinx.coroutines.flow.collectLatest |
| 26 | import kotlinx.coroutines.launch | 25 | import kotlinx.coroutines.launch |
| @@ -68,15 +67,9 @@ class SettingsFragment : Fragment() { | |||
| 68 | ) | 67 | ) |
| 69 | 68 | ||
| 70 | binding.toolbarSettingsLayout.title = getString(args.menuTag.titleId) | 69 | binding.toolbarSettingsLayout.title = getString(args.menuTag.titleId) |
| 71 | val dividerDecoration = MaterialDividerItemDecoration( | ||
| 72 | requireContext(), | ||
| 73 | LinearLayoutManager.VERTICAL | ||
| 74 | ) | ||
| 75 | dividerDecoration.isLastItemDecorated = false | ||
| 76 | binding.listSettings.apply { | 70 | binding.listSettings.apply { |
| 77 | adapter = settingsAdapter | 71 | adapter = settingsAdapter |
| 78 | layoutManager = LinearLayoutManager(requireContext()) | 72 | layoutManager = LinearLayoutManager(requireContext()) |
| 79 | addItemDecoration(dividerDecoration) | ||
| 80 | } | 73 | } |
| 81 | 74 | ||
| 82 | binding.toolbarSettings.setNavigationOnClickListener { | 75 | binding.toolbarSettings.setNavigationOnClickListener { |
| @@ -94,17 +87,6 @@ class SettingsFragment : Fragment() { | |||
| 94 | } | 87 | } |
| 95 | } | 88 | } |
| 96 | } | 89 | } |
| 97 | launch { | ||
| 98 | settingsViewModel.isUsingSearch.collectLatest { | ||
| 99 | if (it) { | ||
| 100 | reenterTransition = MaterialSharedAxis(MaterialSharedAxis.Z, true) | ||
| 101 | exitTransition = MaterialSharedAxis(MaterialSharedAxis.Z, false) | ||
| 102 | } else { | ||
| 103 | reenterTransition = MaterialSharedAxis(MaterialSharedAxis.X, false) | ||
| 104 | exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true) | ||
| 105 | } | ||
| 106 | } | ||
| 107 | } | ||
| 108 | } | 90 | } |
| 109 | 91 | ||
| 110 | if (args.menuTag == Settings.MenuTag.SECTION_ROOT) { | 92 | if (args.menuTag == Settings.MenuTag.SECTION_ROOT) { |
| @@ -112,8 +94,6 @@ class SettingsFragment : Fragment() { | |||
| 112 | binding.toolbarSettings.setOnMenuItemClickListener { | 94 | binding.toolbarSettings.setOnMenuItemClickListener { |
| 113 | when (it.itemId) { | 95 | when (it.itemId) { |
| 114 | R.id.action_search -> { | 96 | R.id.action_search -> { |
| 115 | reenterTransition = MaterialSharedAxis(MaterialSharedAxis.Z, true) | ||
| 116 | exitTransition = MaterialSharedAxis(MaterialSharedAxis.Z, false) | ||
| 117 | view.findNavController() | 97 | view.findNavController() |
| 118 | .navigate(R.id.action_settingsFragment_to_settingsSearchFragment) | 98 | .navigate(R.id.action_settingsFragment_to_settingsSearchFragment) |
| 119 | true | 99 | true |
| @@ -129,11 +109,6 @@ class SettingsFragment : Fragment() { | |||
| 129 | setInsets() | 109 | setInsets() |
| 130 | } | 110 | } |
| 131 | 111 | ||
| 132 | override fun onResume() { | ||
| 133 | super.onResume() | ||
| 134 | settingsViewModel.setIsUsingSearch(false) | ||
| 135 | } | ||
| 136 | |||
| 137 | private fun setInsets() { | 112 | private fun setInsets() { |
| 138 | ViewCompat.setOnApplyWindowInsetsListener( | 113 | ViewCompat.setOnApplyWindowInsetsListener( |
| 139 | binding.root | 114 | binding.root |
| @@ -144,10 +119,9 @@ class SettingsFragment : Fragment() { | |||
| 144 | val leftInsets = barInsets.left + cutoutInsets.left | 119 | val leftInsets = barInsets.left + cutoutInsets.left |
| 145 | val rightInsets = barInsets.right + cutoutInsets.right | 120 | val rightInsets = barInsets.right + cutoutInsets.right |
| 146 | 121 | ||
| 147 | val sideMargin = resources.getDimensionPixelSize(R.dimen.spacing_medlarge) | ||
| 148 | val mlpSettingsList = binding.listSettings.layoutParams as MarginLayoutParams | 122 | val mlpSettingsList = binding.listSettings.layoutParams as MarginLayoutParams |
| 149 | mlpSettingsList.leftMargin = sideMargin + leftInsets | 123 | mlpSettingsList.leftMargin = leftInsets |
| 150 | mlpSettingsList.rightMargin = sideMargin + rightInsets | 124 | mlpSettingsList.rightMargin = rightInsets |
| 151 | binding.listSettings.layoutParams = mlpSettingsList | 125 | binding.listSettings.layoutParams = mlpSettingsList |
| 152 | binding.listSettings.updatePadding( | 126 | binding.listSettings.updatePadding( |
| 153 | bottom = barInsets.bottom | 127 | bottom = barInsets.bottom |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt index 766414a6c..8b71e32f3 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/SettingsFragmentPresenter.kt | |||
| @@ -3,7 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | package org.yuzu.yuzu_emu.features.settings.ui | 4 | package org.yuzu.yuzu_emu.features.settings.ui |
| 5 | 5 | ||
| 6 | import android.content.Context | ||
| 7 | import android.content.SharedPreferences | 6 | import android.content.SharedPreferences |
| 8 | import android.os.Build | 7 | import android.os.Build |
| 9 | import android.widget.Toast | 8 | import android.widget.Toast |
| @@ -32,8 +31,6 @@ class SettingsFragmentPresenter( | |||
| 32 | private val preferences: SharedPreferences | 31 | private val preferences: SharedPreferences |
| 33 | get() = PreferenceManager.getDefaultSharedPreferences(YuzuApplication.appContext) | 32 | get() = PreferenceManager.getDefaultSharedPreferences(YuzuApplication.appContext) |
| 34 | 33 | ||
| 35 | private val context: Context get() = YuzuApplication.appContext | ||
| 36 | |||
| 37 | // Extension for populating settings list based on paired settings | 34 | // Extension for populating settings list based on paired settings |
| 38 | fun ArrayList<SettingsItem>.add(key: String) { | 35 | fun ArrayList<SettingsItem>.add(key: String) { |
| 39 | val item = SettingsItem.settingsItems[key]!! | 36 | val item = SettingsItem.settingsItems[key]!! |
| @@ -53,7 +50,6 @@ class SettingsFragmentPresenter( | |||
| 53 | val sl = ArrayList<SettingsItem>() | 50 | val sl = ArrayList<SettingsItem>() |
| 54 | when (menuTag) { | 51 | when (menuTag) { |
| 55 | Settings.MenuTag.SECTION_ROOT -> addConfigSettings(sl) | 52 | Settings.MenuTag.SECTION_ROOT -> addConfigSettings(sl) |
| 56 | Settings.MenuTag.SECTION_GENERAL -> addGeneralSettings(sl) | ||
| 57 | Settings.MenuTag.SECTION_SYSTEM -> addSystemSettings(sl) | 53 | Settings.MenuTag.SECTION_SYSTEM -> addSystemSettings(sl) |
| 58 | Settings.MenuTag.SECTION_RENDERER -> addGraphicsSettings(sl) | 54 | Settings.MenuTag.SECTION_RENDERER -> addGraphicsSettings(sl) |
| 59 | Settings.MenuTag.SECTION_AUDIO -> addAudioSettings(sl) | 55 | Settings.MenuTag.SECTION_AUDIO -> addAudioSettings(sl) |
| @@ -75,30 +71,53 @@ class SettingsFragmentPresenter( | |||
| 75 | 71 | ||
| 76 | private fun addConfigSettings(sl: ArrayList<SettingsItem>) { | 72 | private fun addConfigSettings(sl: ArrayList<SettingsItem>) { |
| 77 | sl.apply { | 73 | sl.apply { |
| 78 | add(SubmenuSetting(R.string.preferences_general, 0, Settings.MenuTag.SECTION_GENERAL)) | ||
| 79 | add(SubmenuSetting(R.string.preferences_system, 0, Settings.MenuTag.SECTION_SYSTEM)) | ||
| 80 | add(SubmenuSetting(R.string.preferences_graphics, 0, Settings.MenuTag.SECTION_RENDERER)) | ||
| 81 | add(SubmenuSetting(R.string.preferences_audio, 0, Settings.MenuTag.SECTION_AUDIO)) | ||
| 82 | add(SubmenuSetting(R.string.preferences_debug, 0, Settings.MenuTag.SECTION_DEBUG)) | ||
| 83 | add( | 74 | add( |
| 84 | RunnableSetting(R.string.reset_to_default, 0, false) { | 75 | SubmenuSetting( |
| 85 | settingsViewModel.setShouldShowResetSettingsDialog(true) | 76 | R.string.preferences_system, |
| 86 | } | 77 | R.string.preferences_system_description, |
| 78 | R.drawable.ic_system_settings, | ||
| 79 | Settings.MenuTag.SECTION_SYSTEM | ||
| 80 | ) | ||
| 81 | ) | ||
| 82 | add( | ||
| 83 | SubmenuSetting( | ||
| 84 | R.string.preferences_graphics, | ||
| 85 | R.string.preferences_graphics_description, | ||
| 86 | R.drawable.ic_graphics, | ||
| 87 | Settings.MenuTag.SECTION_RENDERER | ||
| 88 | ) | ||
| 89 | ) | ||
| 90 | add( | ||
| 91 | SubmenuSetting( | ||
| 92 | R.string.preferences_audio, | ||
| 93 | R.string.preferences_audio_description, | ||
| 94 | R.drawable.ic_audio, | ||
| 95 | Settings.MenuTag.SECTION_AUDIO | ||
| 96 | ) | ||
| 97 | ) | ||
| 98 | add( | ||
| 99 | SubmenuSetting( | ||
| 100 | R.string.preferences_debug, | ||
| 101 | R.string.preferences_debug_description, | ||
| 102 | R.drawable.ic_code, | ||
| 103 | Settings.MenuTag.SECTION_DEBUG | ||
| 104 | ) | ||
| 105 | ) | ||
| 106 | add( | ||
| 107 | RunnableSetting( | ||
| 108 | R.string.reset_to_default, | ||
| 109 | R.string.reset_to_default_description, | ||
| 110 | false, | ||
| 111 | R.drawable.ic_restore | ||
| 112 | ) { settingsViewModel.setShouldShowResetSettingsDialog(true) } | ||
| 87 | ) | 113 | ) |
| 88 | } | 114 | } |
| 89 | } | 115 | } |
| 90 | 116 | ||
| 91 | private fun addGeneralSettings(sl: ArrayList<SettingsItem>) { | 117 | private fun addSystemSettings(sl: ArrayList<SettingsItem>) { |
| 92 | sl.apply { | 118 | sl.apply { |
| 93 | add(BooleanSetting.RENDERER_USE_SPEED_LIMIT.key) | 119 | add(BooleanSetting.RENDERER_USE_SPEED_LIMIT.key) |
| 94 | add(ShortSetting.RENDERER_SPEED_LIMIT.key) | 120 | add(ShortSetting.RENDERER_SPEED_LIMIT.key) |
| 95 | add(IntSetting.CPU_ACCURACY.key) | ||
| 96 | add(BooleanSetting.PICTURE_IN_PICTURE.key) | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | private fun addSystemSettings(sl: ArrayList<SettingsItem>) { | ||
| 101 | sl.apply { | ||
| 102 | add(BooleanSetting.USE_DOCKED_MODE.key) | 121 | add(BooleanSetting.USE_DOCKED_MODE.key) |
| 103 | add(IntSetting.REGION_INDEX.key) | 122 | add(IntSetting.REGION_INDEX.key) |
| 104 | add(IntSetting.LANGUAGE_INDEX.key) | 123 | add(IntSetting.LANGUAGE_INDEX.key) |
| @@ -116,6 +135,7 @@ class SettingsFragmentPresenter( | |||
| 116 | add(IntSetting.RENDERER_ANTI_ALIASING.key) | 135 | add(IntSetting.RENDERER_ANTI_ALIASING.key) |
| 117 | add(IntSetting.RENDERER_SCREEN_LAYOUT.key) | 136 | add(IntSetting.RENDERER_SCREEN_LAYOUT.key) |
| 118 | add(IntSetting.RENDERER_ASPECT_RATIO.key) | 137 | add(IntSetting.RENDERER_ASPECT_RATIO.key) |
| 138 | add(BooleanSetting.PICTURE_IN_PICTURE.key) | ||
| 119 | add(BooleanSetting.RENDERER_USE_DISK_SHADER_CACHE.key) | 139 | add(BooleanSetting.RENDERER_USE_DISK_SHADER_CACHE.key) |
| 120 | add(BooleanSetting.RENDERER_FORCE_MAX_CLOCK.key) | 140 | add(BooleanSetting.RENDERER_FORCE_MAX_CLOCK.key) |
| 121 | add(BooleanSetting.RENDERER_ASYNCHRONOUS_SHADERS.key) | 141 | add(BooleanSetting.RENDERER_ASYNCHRONOUS_SHADERS.key) |
| @@ -249,6 +269,7 @@ class SettingsFragmentPresenter( | |||
| 249 | add(BooleanSetting.RENDERER_DEBUG.key) | 269 | add(BooleanSetting.RENDERER_DEBUG.key) |
| 250 | 270 | ||
| 251 | add(HeaderSetting(R.string.cpu)) | 271 | add(HeaderSetting(R.string.cpu)) |
| 272 | add(IntSetting.CPU_ACCURACY.key) | ||
| 252 | add(BooleanSetting.CPU_DEBUG_MODE.key) | 273 | add(BooleanSetting.CPU_DEBUG_MODE.key) |
| 253 | add(SettingsItem.FASTMEM_COMBINED) | 274 | add(SettingsItem.FASTMEM_COMBINED) |
| 254 | } | 275 | } |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt index 83a2e94f1..036195624 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/RunnableViewHolder.kt | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | package org.yuzu.yuzu_emu.features.settings.ui.viewholder | 4 | package org.yuzu.yuzu_emu.features.settings.ui.viewholder |
| 5 | 5 | ||
| 6 | import android.view.View | 6 | import android.view.View |
| 7 | import androidx.core.content.res.ResourcesCompat | ||
| 7 | import org.yuzu.yuzu_emu.NativeLibrary | 8 | import org.yuzu.yuzu_emu.NativeLibrary |
| 8 | import org.yuzu.yuzu_emu.databinding.ListItemSettingBinding | 9 | import org.yuzu.yuzu_emu.databinding.ListItemSettingBinding |
| 9 | import org.yuzu.yuzu_emu.features.settings.model.view.RunnableSetting | 10 | import org.yuzu.yuzu_emu.features.settings.model.view.RunnableSetting |
| @@ -16,6 +17,19 @@ class RunnableViewHolder(val binding: ListItemSettingBinding, adapter: SettingsA | |||
| 16 | 17 | ||
| 17 | override fun bind(item: SettingsItem) { | 18 | override fun bind(item: SettingsItem) { |
| 18 | setting = item as RunnableSetting | 19 | setting = item as RunnableSetting |
| 20 | if (item.iconId != 0) { | ||
| 21 | binding.icon.visibility = View.VISIBLE | ||
| 22 | binding.icon.setImageDrawable( | ||
| 23 | ResourcesCompat.getDrawable( | ||
| 24 | binding.icon.resources, | ||
| 25 | item.iconId, | ||
| 26 | binding.icon.context.theme | ||
| 27 | ) | ||
| 28 | ) | ||
| 29 | } else { | ||
| 30 | binding.icon.visibility = View.GONE | ||
| 31 | } | ||
| 32 | |||
| 19 | binding.textSettingName.setText(item.nameId) | 33 | binding.textSettingName.setText(item.nameId) |
| 20 | if (item.descriptionId != 0) { | 34 | if (item.descriptionId != 0) { |
| 21 | binding.textSettingDescription.setText(item.descriptionId) | 35 | binding.textSettingDescription.setText(item.descriptionId) |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt index 1cf581a9d..8100c65dd 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/features/settings/ui/viewholder/SubmenuViewHolder.kt | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | package org.yuzu.yuzu_emu.features.settings.ui.viewholder | 4 | package org.yuzu.yuzu_emu.features.settings.ui.viewholder |
| 5 | 5 | ||
| 6 | import android.view.View | 6 | import android.view.View |
| 7 | import androidx.core.content.res.ResourcesCompat | ||
| 7 | import org.yuzu.yuzu_emu.databinding.ListItemSettingBinding | 8 | import org.yuzu.yuzu_emu.databinding.ListItemSettingBinding |
| 8 | import org.yuzu.yuzu_emu.features.settings.model.view.SettingsItem | 9 | import org.yuzu.yuzu_emu.features.settings.model.view.SettingsItem |
| 9 | import org.yuzu.yuzu_emu.features.settings.model.view.SubmenuSetting | 10 | import org.yuzu.yuzu_emu.features.settings.model.view.SubmenuSetting |
| @@ -15,6 +16,19 @@ class SubmenuViewHolder(val binding: ListItemSettingBinding, adapter: SettingsAd | |||
| 15 | 16 | ||
| 16 | override fun bind(item: SettingsItem) { | 17 | override fun bind(item: SettingsItem) { |
| 17 | this.item = item as SubmenuSetting | 18 | this.item = item as SubmenuSetting |
| 19 | if (item.iconId != 0) { | ||
| 20 | binding.icon.visibility = View.VISIBLE | ||
| 21 | binding.icon.setImageDrawable( | ||
| 22 | ResourcesCompat.getDrawable( | ||
| 23 | binding.icon.resources, | ||
| 24 | item.iconId, | ||
| 25 | binding.icon.context.theme | ||
| 26 | ) | ||
| 27 | ) | ||
| 28 | } else { | ||
| 29 | binding.icon.visibility = View.GONE | ||
| 30 | } | ||
| 31 | |||
| 18 | binding.textSettingName.setText(item.nameId) | 32 | binding.textSettingName.setText(item.nameId) |
| 19 | if (item.descriptionId != 0) { | 33 | if (item.descriptionId != 0) { |
| 20 | binding.textSettingDescription.setText(item.descriptionId) | 34 | binding.textSettingDescription.setText(item.descriptionId) |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt index 2ff827c6b..a1620fbb7 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/AboutFragment.kt | |||
| @@ -114,10 +114,10 @@ class AboutFragment : Fragment() { | |||
| 114 | val leftInsets = barInsets.left + cutoutInsets.left | 114 | val leftInsets = barInsets.left + cutoutInsets.left |
| 115 | val rightInsets = barInsets.right + cutoutInsets.right | 115 | val rightInsets = barInsets.right + cutoutInsets.right |
| 116 | 116 | ||
| 117 | val mlpAppBar = binding.appbarAbout.layoutParams as MarginLayoutParams | 117 | val mlpToolbar = binding.toolbarAbout.layoutParams as MarginLayoutParams |
| 118 | mlpAppBar.leftMargin = leftInsets | 118 | mlpToolbar.leftMargin = leftInsets |
| 119 | mlpAppBar.rightMargin = rightInsets | 119 | mlpToolbar.rightMargin = rightInsets |
| 120 | binding.appbarAbout.layoutParams = mlpAppBar | 120 | binding.toolbarAbout.layoutParams = mlpToolbar |
| 121 | 121 | ||
| 122 | val mlpScrollAbout = binding.scrollAbout.layoutParams as MarginLayoutParams | 122 | val mlpScrollAbout = binding.scrollAbout.layoutParams as MarginLayoutParams |
| 123 | mlpScrollAbout.leftMargin = leftInsets | 123 | mlpScrollAbout.leftMargin = leftInsets |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt index c456c0592..c32fa0d7e 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/EmulationFragment.kt | |||
| @@ -10,7 +10,6 @@ import android.content.DialogInterface | |||
| 10 | import android.content.SharedPreferences | 10 | import android.content.SharedPreferences |
| 11 | import android.content.pm.ActivityInfo | 11 | import android.content.pm.ActivityInfo |
| 12 | import android.content.res.Configuration | 12 | import android.content.res.Configuration |
| 13 | import android.graphics.Color | ||
| 14 | import android.net.Uri | 13 | import android.net.Uri |
| 15 | import android.os.Bundle | 14 | import android.os.Bundle |
| 16 | import android.os.Handler | 15 | import android.os.Handler |
| @@ -155,7 +154,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback { | |||
| 155 | } | 154 | } |
| 156 | 155 | ||
| 157 | binding.surfaceEmulation.holder.addCallback(this) | 156 | binding.surfaceEmulation.holder.addCallback(this) |
| 158 | binding.showFpsText.setTextColor(Color.YELLOW) | ||
| 159 | binding.doneControlConfig.setOnClickListener { stopConfiguringControls() } | 157 | binding.doneControlConfig.setOnClickListener { stopConfiguringControls() } |
| 160 | 158 | ||
| 161 | binding.drawerLayout.addDrawerListener(object : DrawerListener { | 159 | binding.drawerLayout.addDrawerListener(object : DrawerListener { |
| @@ -414,12 +412,12 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback { | |||
| 414 | val FRAMETIME = 2 | 412 | val FRAMETIME = 2 |
| 415 | val SPEED = 3 | 413 | val SPEED = 3 |
| 416 | perfStatsUpdater = { | 414 | perfStatsUpdater = { |
| 417 | if (emulationViewModel.emulationStarted.value == true) { | 415 | if (emulationViewModel.emulationStarted.value) { |
| 418 | val perfStats = NativeLibrary.getPerfStats() | 416 | val perfStats = NativeLibrary.getPerfStats() |
| 419 | if (perfStats[FPS] > 0 && _binding != null) { | 417 | if (_binding != null) { |
| 420 | binding.showFpsText.text = String.format("FPS: %.1f", perfStats[FPS]) | 418 | binding.showFpsText.text = String.format("FPS: %.1f", perfStats[FPS]) |
| 421 | } | 419 | } |
| 422 | perfStatsUpdateHandler.postDelayed(perfStatsUpdater!!, 100) | 420 | perfStatsUpdateHandler.postDelayed(perfStatsUpdater!!, 800) |
| 423 | } | 421 | } |
| 424 | } | 422 | } |
| 425 | perfStatsUpdateHandler.post(perfStatsUpdater!!) | 423 | perfStatsUpdateHandler.post(perfStatsUpdater!!) |
| @@ -464,7 +462,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback { | |||
| 464 | if (it.orientation == FoldingFeature.Orientation.HORIZONTAL) { | 462 | if (it.orientation == FoldingFeature.Orientation.HORIZONTAL) { |
| 465 | // Restrict emulation and overlays to the top of the screen | 463 | // Restrict emulation and overlays to the top of the screen |
| 466 | binding.emulationContainer.layoutParams.height = it.bounds.top | 464 | binding.emulationContainer.layoutParams.height = it.bounds.top |
| 467 | binding.overlayContainer.layoutParams.height = it.bounds.top | ||
| 468 | // Restrict input and menu drawer to the bottom of the screen | 465 | // Restrict input and menu drawer to the bottom of the screen |
| 469 | binding.inputContainer.layoutParams.height = it.bounds.bottom | 466 | binding.inputContainer.layoutParams.height = it.bounds.bottom |
| 470 | binding.inGameMenu.layoutParams.height = it.bounds.bottom | 467 | binding.inGameMenu.layoutParams.height = it.bounds.bottom |
| @@ -478,7 +475,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback { | |||
| 478 | if (!isFolding) { | 475 | if (!isFolding) { |
| 479 | binding.emulationContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT | 476 | binding.emulationContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT |
| 480 | binding.inputContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT | 477 | binding.inputContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT |
| 481 | binding.overlayContainer.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT | ||
| 482 | binding.inGameMenu.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT | 478 | binding.inGameMenu.layoutParams.height = ViewGroup.LayoutParams.MATCH_PARENT |
| 483 | isInFoldableLayout = false | 479 | isInFoldableLayout = false |
| 484 | updateOrientation() | 480 | updateOrientation() |
| @@ -486,7 +482,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback { | |||
| 486 | } | 482 | } |
| 487 | binding.emulationContainer.requestLayout() | 483 | binding.emulationContainer.requestLayout() |
| 488 | binding.inputContainer.requestLayout() | 484 | binding.inputContainer.requestLayout() |
| 489 | binding.overlayContainer.requestLayout() | ||
| 490 | binding.inGameMenu.requestLayout() | 485 | binding.inGameMenu.requestLayout() |
| 491 | } | 486 | } |
| 492 | 487 | ||
| @@ -712,24 +707,6 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback { | |||
| 712 | } | 707 | } |
| 713 | 708 | ||
| 714 | v.setPadding(left, cutInsets.top, right, 0) | 709 | v.setPadding(left, cutInsets.top, right, 0) |
| 715 | |||
| 716 | // Ensure FPS text doesn't get cut off by rounded display corners | ||
| 717 | val sidePadding = resources.getDimensionPixelSize(R.dimen.spacing_xtralarge) | ||
| 718 | if (cutInsets.left == 0) { | ||
| 719 | binding.showFpsText.setPadding( | ||
| 720 | sidePadding, | ||
| 721 | cutInsets.top, | ||
| 722 | cutInsets.right, | ||
| 723 | cutInsets.bottom | ||
| 724 | ) | ||
| 725 | } else { | ||
| 726 | binding.showFpsText.setPadding( | ||
| 727 | cutInsets.left, | ||
| 728 | cutInsets.top, | ||
| 729 | cutInsets.right, | ||
| 730 | cutInsets.bottom | ||
| 731 | ) | ||
| 732 | } | ||
| 733 | windowInsets | 710 | windowInsets |
| 734 | } | 711 | } |
| 735 | } | 712 | } |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt index 9d0594c6e..f95d545bf 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/fragments/SettingsSearchFragment.kt | |||
| @@ -40,8 +40,10 @@ class SettingsSearchFragment : Fragment() { | |||
| 40 | 40 | ||
| 41 | override fun onCreate(savedInstanceState: Bundle?) { | 41 | override fun onCreate(savedInstanceState: Bundle?) { |
| 42 | super.onCreate(savedInstanceState) | 42 | super.onCreate(savedInstanceState) |
| 43 | enterTransition = MaterialSharedAxis(MaterialSharedAxis.Z, false) | 43 | enterTransition = MaterialSharedAxis(MaterialSharedAxis.X, true) |
| 44 | returnTransition = MaterialSharedAxis(MaterialSharedAxis.Z, true) | 44 | returnTransition = MaterialSharedAxis(MaterialSharedAxis.X, false) |
| 45 | reenterTransition = MaterialSharedAxis(MaterialSharedAxis.X, false) | ||
| 46 | exitTransition = MaterialSharedAxis(MaterialSharedAxis.X, true) | ||
| 45 | } | 47 | } |
| 46 | 48 | ||
| 47 | override fun onCreateView( | 49 | override fun onCreateView( |
| @@ -55,7 +57,6 @@ class SettingsSearchFragment : Fragment() { | |||
| 55 | 57 | ||
| 56 | override fun onViewCreated(view: View, savedInstanceState: Bundle?) { | 58 | override fun onViewCreated(view: View, savedInstanceState: Bundle?) { |
| 57 | super.onViewCreated(view, savedInstanceState) | 59 | super.onViewCreated(view, savedInstanceState) |
| 58 | settingsViewModel.setIsUsingSearch(true) | ||
| 59 | 60 | ||
| 60 | if (savedInstanceState != null) { | 61 | if (savedInstanceState != null) { |
| 61 | binding.searchText.setText(savedInstanceState.getString(SEARCH_TEXT)) | 62 | binding.searchText.setText(savedInstanceState.getString(SEARCH_TEXT)) |
diff --git a/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt b/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt index 53fa7a8de..6f947674e 100644 --- a/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt +++ b/src/android/app/src/main/java/org/yuzu/yuzu_emu/model/SettingsViewModel.kt | |||
| @@ -29,9 +29,6 @@ class SettingsViewModel : ViewModel() { | |||
| 29 | val shouldReloadSettingsList: StateFlow<Boolean> get() = _shouldReloadSettingsList | 29 | val shouldReloadSettingsList: StateFlow<Boolean> get() = _shouldReloadSettingsList |
| 30 | private val _shouldReloadSettingsList = MutableStateFlow(false) | 30 | private val _shouldReloadSettingsList = MutableStateFlow(false) |
| 31 | 31 | ||
| 32 | val isUsingSearch: StateFlow<Boolean> get() = _isUsingSearch | ||
| 33 | private val _isUsingSearch = MutableStateFlow(false) | ||
| 34 | |||
| 35 | val sliderProgress: StateFlow<Int> get() = _sliderProgress | 32 | val sliderProgress: StateFlow<Int> get() = _sliderProgress |
| 36 | private val _sliderProgress = MutableStateFlow(-1) | 33 | private val _sliderProgress = MutableStateFlow(-1) |
| 37 | 34 | ||
| @@ -57,10 +54,6 @@ class SettingsViewModel : ViewModel() { | |||
| 57 | _shouldReloadSettingsList.value = value | 54 | _shouldReloadSettingsList.value = value |
| 58 | } | 55 | } |
| 59 | 56 | ||
| 60 | fun setIsUsingSearch(value: Boolean) { | ||
| 61 | _isUsingSearch.value = value | ||
| 62 | } | ||
| 63 | |||
| 64 | fun setSliderTextValue(value: Float, units: String) { | 57 | fun setSliderTextValue(value: Float, units: String) { |
| 65 | _sliderProgress.value = value.toInt() | 58 | _sliderProgress.value = value.toInt() |
| 66 | _sliderTextValue.value = String.format( | 59 | _sliderTextValue.value = String.format( |
diff --git a/src/android/app/src/main/jni/emu_window/emu_window.cpp b/src/android/app/src/main/jni/emu_window/emu_window.cpp index a7e414b81..c4f631924 100644 --- a/src/android/app/src/main/jni/emu_window/emu_window.cpp +++ b/src/android/app/src/main/jni/emu_window/emu_window.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "input_common/drivers/virtual_gamepad.h" | 9 | #include "input_common/drivers/virtual_gamepad.h" |
| 10 | #include "input_common/main.h" | 10 | #include "input_common/main.h" |
| 11 | #include "jni/emu_window/emu_window.h" | 11 | #include "jni/emu_window/emu_window.h" |
| 12 | #include "jni/native.h" | ||
| 12 | 13 | ||
| 13 | void EmuWindow_Android::OnSurfaceChanged(ANativeWindow* surface) { | 14 | void EmuWindow_Android::OnSurfaceChanged(ANativeWindow* surface) { |
| 14 | m_window_width = ANativeWindow_getWidth(surface); | 15 | m_window_width = ANativeWindow_getWidth(surface); |
| @@ -57,6 +58,13 @@ void EmuWindow_Android::OnRemoveNfcTag() { | |||
| 57 | m_input_subsystem->GetVirtualAmiibo()->CloseAmiibo(); | 58 | m_input_subsystem->GetVirtualAmiibo()->CloseAmiibo(); |
| 58 | } | 59 | } |
| 59 | 60 | ||
| 61 | void EmuWindow_Android::OnFrameDisplayed() { | ||
| 62 | if (!m_first_frame) { | ||
| 63 | EmulationSession::GetInstance().OnEmulationStarted(); | ||
| 64 | m_first_frame = true; | ||
| 65 | } | ||
| 66 | } | ||
| 67 | |||
| 60 | EmuWindow_Android::EmuWindow_Android(InputCommon::InputSubsystem* input_subsystem, | 68 | EmuWindow_Android::EmuWindow_Android(InputCommon::InputSubsystem* input_subsystem, |
| 61 | ANativeWindow* surface, | 69 | ANativeWindow* surface, |
| 62 | std::shared_ptr<Common::DynamicLibrary> driver_library) | 70 | std::shared_ptr<Common::DynamicLibrary> driver_library) |
diff --git a/src/android/app/src/main/jni/emu_window/emu_window.h b/src/android/app/src/main/jni/emu_window/emu_window.h index b38087f73..a34a0e479 100644 --- a/src/android/app/src/main/jni/emu_window/emu_window.h +++ b/src/android/app/src/main/jni/emu_window/emu_window.h | |||
| @@ -45,7 +45,7 @@ public: | |||
| 45 | float gyro_z, float accel_x, float accel_y, float accel_z); | 45 | float gyro_z, float accel_x, float accel_y, float accel_z); |
| 46 | void OnReadNfcTag(std::span<u8> data); | 46 | void OnReadNfcTag(std::span<u8> data); |
| 47 | void OnRemoveNfcTag(); | 47 | void OnRemoveNfcTag(); |
| 48 | void OnFrameDisplayed() override {} | 48 | void OnFrameDisplayed() override; |
| 49 | 49 | ||
| 50 | std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override { | 50 | std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override { |
| 51 | return {std::make_unique<GraphicsContext_Android>(m_driver_library)}; | 51 | return {std::make_unique<GraphicsContext_Android>(m_driver_library)}; |
| @@ -61,4 +61,6 @@ private: | |||
| 61 | float m_window_height{}; | 61 | float m_window_height{}; |
| 62 | 62 | ||
| 63 | std::shared_ptr<Common::DynamicLibrary> m_driver_library; | 63 | std::shared_ptr<Common::DynamicLibrary> m_driver_library; |
| 64 | |||
| 65 | bool m_first_frame = false; | ||
| 64 | }; | 66 | }; |
diff --git a/src/android/app/src/main/jni/native.cpp b/src/android/app/src/main/jni/native.cpp index 46438906e..64663b084 100644 --- a/src/android/app/src/main/jni/native.cpp +++ b/src/android/app/src/main/jni/native.cpp | |||
| @@ -199,8 +199,8 @@ bool EmulationSession::IsPaused() const { | |||
| 199 | return m_is_running && m_is_paused; | 199 | return m_is_running && m_is_paused; |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | const Core::PerfStatsResults& EmulationSession::PerfStats() const { | 202 | const Core::PerfStatsResults& EmulationSession::PerfStats() { |
| 203 | std::scoped_lock m_perf_stats_lock(m_perf_stats_mutex); | 203 | m_perf_stats = m_system.GetAndResetPerfStats(); |
| 204 | return m_perf_stats; | 204 | return m_perf_stats; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| @@ -372,8 +372,6 @@ void EmulationSession::RunEmulation() { | |||
| 372 | m_system.InitializeDebugger(); | 372 | m_system.InitializeDebugger(); |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | OnEmulationStarted(); | ||
| 376 | |||
| 377 | while (true) { | 375 | while (true) { |
| 378 | { | 376 | { |
| 379 | [[maybe_unused]] std::unique_lock lock(m_mutex); | 377 | [[maybe_unused]] std::unique_lock lock(m_mutex); |
| @@ -383,11 +381,6 @@ void EmulationSession::RunEmulation() { | |||
| 383 | break; | 381 | break; |
| 384 | } | 382 | } |
| 385 | } | 383 | } |
| 386 | { | ||
| 387 | // Refresh performance stats. | ||
| 388 | std::scoped_lock m_perf_stats_lock(m_perf_stats_mutex); | ||
| 389 | m_perf_stats = m_system.GetAndResetPerfStats(); | ||
| 390 | } | ||
| 391 | } | 384 | } |
| 392 | } | 385 | } |
| 393 | 386 | ||
diff --git a/src/android/app/src/main/jni/native.h b/src/android/app/src/main/jni/native.h index 3b9596459..78ef96802 100644 --- a/src/android/app/src/main/jni/native.h +++ b/src/android/app/src/main/jni/native.h | |||
| @@ -41,7 +41,7 @@ public: | |||
| 41 | void RunEmulation(); | 41 | void RunEmulation(); |
| 42 | void ShutdownEmulation(); | 42 | void ShutdownEmulation(); |
| 43 | 43 | ||
| 44 | const Core::PerfStatsResults& PerfStats() const; | 44 | const Core::PerfStatsResults& PerfStats(); |
| 45 | void ConfigureFilesystemProvider(const std::string& filepath); | 45 | void ConfigureFilesystemProvider(const std::string& filepath); |
| 46 | void InitializeSystem(bool reload); | 46 | void InitializeSystem(bool reload); |
| 47 | Core::SystemResultStatus InitializeEmulation(const std::string& filepath); | 47 | Core::SystemResultStatus InitializeEmulation(const std::string& filepath); |
| @@ -52,9 +52,10 @@ public: | |||
| 52 | void OnGamepadDisconnectEvent([[maybe_unused]] int index); | 52 | void OnGamepadDisconnectEvent([[maybe_unused]] int index); |
| 53 | SoftwareKeyboard::AndroidKeyboard* SoftwareKeyboard(); | 53 | SoftwareKeyboard::AndroidKeyboard* SoftwareKeyboard(); |
| 54 | 54 | ||
| 55 | static void OnEmulationStarted(); | ||
| 56 | |||
| 55 | private: | 57 | private: |
| 56 | static void LoadDiskCacheProgress(VideoCore::LoadCallbackStage stage, int progress, int max); | 58 | static void LoadDiskCacheProgress(VideoCore::LoadCallbackStage stage, int progress, int max); |
| 57 | static void OnEmulationStarted(); | ||
| 58 | static void OnEmulationStopped(Core::SystemResultStatus result); | 59 | static void OnEmulationStopped(Core::SystemResultStatus result); |
| 59 | 60 | ||
| 60 | private: | 61 | private: |
| @@ -80,6 +81,5 @@ private: | |||
| 80 | 81 | ||
| 81 | // Synchronization | 82 | // Synchronization |
| 82 | std::condition_variable_any m_cv; | 83 | std::condition_variable_any m_cv; |
| 83 | mutable std::mutex m_perf_stats_mutex; | ||
| 84 | mutable std::mutex m_mutex; | 84 | mutable std::mutex m_mutex; |
| 85 | }; | 85 | }; |
diff --git a/src/android/app/src/main/res/drawable/ic_audio.xml b/src/android/app/src/main/res/drawable/ic_audio.xml new file mode 100644 index 000000000..e306c3b0c --- /dev/null +++ b/src/android/app/src/main/res/drawable/ic_audio.xml | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | <vector xmlns:android="http://schemas.android.com/apk/res/android" | ||
| 2 | android:width="24dp" | ||
| 3 | android:height="24dp" | ||
| 4 | android:viewportHeight="24" | ||
| 5 | android:viewportWidth="24"> | ||
| 6 | <path | ||
| 7 | android:fillColor="?attr/colorControlNormal" | ||
| 8 | android:pathData="M3,9v6h4l5,5L12,4L7,9L3,9zM16.5,12c0,-1.77 -1.02,-3.29 -2.5,-4.03v8.05c1.48,-0.73 2.5,-2.25 2.5,-4.02zM14,3.23v2.06c2.89,0.86 5,3.54 5,6.71s-2.11,5.85 -5,6.71v2.06c4.01,-0.91 7,-4.49 7,-8.77s-2.99,-7.86 -7,-8.77z" /> | ||
| 9 | </vector> | ||
diff --git a/src/android/app/src/main/res/drawable/ic_code.xml b/src/android/app/src/main/res/drawable/ic_code.xml new file mode 100644 index 000000000..26f83b39b --- /dev/null +++ b/src/android/app/src/main/res/drawable/ic_code.xml | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | <vector xmlns:android="http://schemas.android.com/apk/res/android" | ||
| 2 | android:width="24dp" | ||
| 3 | android:height="24dp" | ||
| 4 | android:viewportWidth="960" | ||
| 5 | android:viewportHeight="960"> | ||
| 6 | <path | ||
| 7 | android:fillColor="?attr/colorControlNormal" | ||
| 8 | android:pathData="M320,720 L80,480l240,-240 57,57 -184,184 183,183 -56,56ZM640,720 L583,663 767,479 584,296 640,240 880,480 640,720Z"/> | ||
| 9 | </vector> | ||
diff --git a/src/android/app/src/main/res/drawable/ic_graphics.xml b/src/android/app/src/main/res/drawable/ic_graphics.xml new file mode 100644 index 000000000..2fdb5a4d6 --- /dev/null +++ b/src/android/app/src/main/res/drawable/ic_graphics.xml | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | <vector xmlns:android="http://schemas.android.com/apk/res/android" | ||
| 2 | android:width="24dp" | ||
| 3 | android:height="24dp" | ||
| 4 | android:viewportWidth="960" | ||
| 5 | android:viewportHeight="960"> | ||
| 6 | <path | ||
| 7 | android:fillColor="?attr/colorControlNormal" | ||
| 8 | android:pathData="M160,840q-33,0 -56.5,-23.5T80,760v-560q0,-33 23.5,-56.5T160,120h560q33,0 56.5,23.5T800,200v80h80v80h-80v80h80v80h-80v80h80v80h-80v80q0,33 -23.5,56.5T720,840L160,840ZM160,760h560v-560L160,200v560ZM240,680h200v-160L240,520v160ZM480,400h160v-120L480,280v120ZM240,480h200v-200L240,280v200ZM480,680h160v-240L480,440v240ZM160,200v560,-560Z"/> | ||
| 9 | </vector> | ||
diff --git a/src/android/app/src/main/res/drawable/ic_system_settings.xml b/src/android/app/src/main/res/drawable/ic_system_settings.xml new file mode 100644 index 000000000..7701a2bab --- /dev/null +++ b/src/android/app/src/main/res/drawable/ic_system_settings.xml | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | <vector xmlns:android="http://schemas.android.com/apk/res/android" | ||
| 2 | android:width="24dp" | ||
| 3 | android:height="24dp" | ||
| 4 | android:viewportWidth="960" | ||
| 5 | android:viewportHeight="960"> | ||
| 6 | <path | ||
| 7 | android:fillColor="?attr/colorControlNormal" | ||
| 8 | android:pathData="M320,960q-17,0 -28.5,-11.5T280,920q0,-17 11.5,-28.5T320,880q17,0 28.5,11.5T360,920q0,17 -11.5,28.5T320,960ZM480,960q-17,0 -28.5,-11.5T440,920q0,-17 11.5,-28.5T480,880q17,0 28.5,11.5T520,920q0,17 -11.5,28.5T480,960ZM640,960q-17,0 -28.5,-11.5T600,920q0,-17 11.5,-28.5T640,880q17,0 28.5,11.5T680,920q0,17 -11.5,28.5T640,960ZM320,800q-33,0 -56.5,-23.5T240,720v-640q0,-33 23.5,-56.5T320,0h320q33,0 56.5,23.5T720,80v640q0,33 -23.5,56.5T640,800L320,800ZM320,720h320v-40L320,680v40ZM320,600h320v-400L320,200v400ZM320,120h320v-40L320,80v40ZM320,120v-40,40ZM320,720v-40,40Z"/> | ||
| 9 | </vector> | ||
diff --git a/src/android/app/src/main/res/layout-w600dp/fragment_about.xml b/src/android/app/src/main/res/layout-w600dp/fragment_about.xml new file mode 100644 index 000000000..a26ffbc73 --- /dev/null +++ b/src/android/app/src/main/res/layout-w600dp/fragment_about.xml | |||
| @@ -0,0 +1,233 @@ | |||
| 1 | <?xml version="1.0" encoding="utf-8"?> | ||
| 2 | <androidx.coordinatorlayout.widget.CoordinatorLayout xmlns:android="http://schemas.android.com/apk/res/android" | ||
| 3 | xmlns:app="http://schemas.android.com/apk/res-auto" | ||
| 4 | xmlns:tools="http://schemas.android.com/tools" | ||
| 5 | android:id="@+id/coordinator_about" | ||
| 6 | android:layout_width="match_parent" | ||
| 7 | android:layout_height="match_parent" | ||
| 8 | android:background="?attr/colorSurface"> | ||
| 9 | |||
| 10 | <com.google.android.material.appbar.AppBarLayout | ||
| 11 | android:id="@+id/appbar_about" | ||
| 12 | android:layout_width="match_parent" | ||
| 13 | android:layout_height="wrap_content" | ||
| 14 | android:fitsSystemWindows="true"> | ||
| 15 | |||
| 16 | <com.google.android.material.appbar.MaterialToolbar | ||
| 17 | android:id="@+id/toolbar_about" | ||
| 18 | android:layout_width="match_parent" | ||
| 19 | android:layout_height="?attr/actionBarSize" | ||
| 20 | app:navigationIcon="@drawable/ic_back" | ||
| 21 | app:title="@string/about" /> | ||
| 22 | |||
| 23 | </com.google.android.material.appbar.AppBarLayout> | ||
| 24 | |||
| 25 | <androidx.core.widget.NestedScrollView | ||
| 26 | android:id="@+id/scroll_about" | ||
| 27 | android:layout_width="match_parent" | ||
| 28 | android:layout_height="match_parent" | ||
| 29 | android:fadeScrollbars="false" | ||
| 30 | android:scrollbars="vertical" | ||
| 31 | app:layout_behavior="@string/appbar_scrolling_view_behavior"> | ||
| 32 | |||
| 33 | <LinearLayout | ||
| 34 | android:id="@+id/content_about" | ||
| 35 | android:layout_width="match_parent" | ||
| 36 | android:layout_height="match_parent" | ||
| 37 | android:orientation="horizontal"> | ||
| 38 | |||
| 39 | <ImageView | ||
| 40 | android:id="@+id/image_logo" | ||
| 41 | android:layout_width="200dp" | ||
| 42 | android:layout_height="200dp" | ||
| 43 | android:layout_gravity="center_horizontal" | ||
| 44 | android:padding="20dp" | ||
| 45 | android:src="@drawable/ic_yuzu_title" /> | ||
| 46 | |||
| 47 | <LinearLayout | ||
| 48 | android:layout_width="wrap_content" | ||
| 49 | android:layout_height="wrap_content" | ||
| 50 | android:orientation="vertical"> | ||
| 51 | |||
| 52 | <LinearLayout | ||
| 53 | android:layout_width="match_parent" | ||
| 54 | android:layout_height="wrap_content" | ||
| 55 | android:orientation="vertical" | ||
| 56 | android:paddingHorizontal="16dp" | ||
| 57 | android:paddingVertical="16dp"> | ||
| 58 | |||
| 59 | <com.google.android.material.textview.MaterialTextView | ||
| 60 | style="@style/TextAppearance.Material3.TitleMedium" | ||
| 61 | android:layout_width="match_parent" | ||
| 62 | android:layout_height="wrap_content" | ||
| 63 | android:layout_marginHorizontal="24dp" | ||
| 64 | android:text="@string/about" | ||
| 65 | android:textAlignment="viewStart" /> | ||
| 66 | |||
| 67 | <com.google.android.material.textview.MaterialTextView | ||
| 68 | style="@style/TextAppearance.Material3.BodyMedium" | ||
| 69 | android:layout_width="match_parent" | ||
| 70 | android:layout_height="wrap_content" | ||
| 71 | android:layout_marginHorizontal="24dp" | ||
| 72 | android:layout_marginTop="6dp" | ||
| 73 | android:text="@string/about_app_description" | ||
| 74 | android:textAlignment="viewStart" /> | ||
| 75 | |||
| 76 | </LinearLayout> | ||
| 77 | |||
| 78 | <com.google.android.material.divider.MaterialDivider | ||
| 79 | android:layout_width="match_parent" | ||
| 80 | android:layout_height="wrap_content" | ||
| 81 | android:layout_marginHorizontal="20dp" /> | ||
| 82 | |||
| 83 | <LinearLayout | ||
| 84 | android:id="@+id/button_contributors" | ||
| 85 | android:layout_width="match_parent" | ||
| 86 | android:layout_height="wrap_content" | ||
| 87 | android:background="?attr/selectableItemBackground" | ||
| 88 | android:orientation="vertical" | ||
| 89 | android:paddingHorizontal="16dp" | ||
| 90 | android:paddingVertical="16dp"> | ||
| 91 | |||
| 92 | <com.google.android.material.textview.MaterialTextView | ||
| 93 | style="@style/TextAppearance.Material3.TitleMedium" | ||
| 94 | android:layout_width="match_parent" | ||
| 95 | android:layout_height="wrap_content" | ||
| 96 | android:layout_marginHorizontal="24dp" | ||
| 97 | android:text="@string/contributors" | ||
| 98 | android:textAlignment="viewStart" /> | ||
| 99 | |||
| 100 | <com.google.android.material.textview.MaterialTextView | ||
| 101 | style="@style/TextAppearance.Material3.BodyMedium" | ||
| 102 | android:layout_width="match_parent" | ||
| 103 | android:layout_height="wrap_content" | ||
| 104 | android:layout_marginHorizontal="24dp" | ||
| 105 | android:layout_marginTop="6dp" | ||
| 106 | android:text="@string/contributors_description" | ||
| 107 | android:textAlignment="viewStart" /> | ||
| 108 | |||
| 109 | </LinearLayout> | ||
| 110 | |||
| 111 | <com.google.android.material.divider.MaterialDivider | ||
| 112 | android:layout_width="match_parent" | ||
| 113 | android:layout_height="wrap_content" | ||
| 114 | android:layout_marginHorizontal="20dp" /> | ||
| 115 | |||
| 116 | <LinearLayout | ||
| 117 | android:id="@+id/button_licenses" | ||
| 118 | android:layout_width="match_parent" | ||
| 119 | android:layout_height="wrap_content" | ||
| 120 | android:background="?attr/selectableItemBackground" | ||
| 121 | android:orientation="vertical" | ||
| 122 | android:paddingHorizontal="16dp" | ||
| 123 | android:paddingVertical="16dp"> | ||
| 124 | |||
| 125 | <com.google.android.material.textview.MaterialTextView | ||
| 126 | style="@style/TextAppearance.Material3.TitleMedium" | ||
| 127 | android:layout_width="match_parent" | ||
| 128 | android:layout_height="wrap_content" | ||
| 129 | android:layout_marginHorizontal="24dp" | ||
| 130 | android:text="@string/licenses" | ||
| 131 | android:textAlignment="viewStart" /> | ||
| 132 | |||
| 133 | <com.google.android.material.textview.MaterialTextView | ||
| 134 | style="@style/TextAppearance.Material3.BodyMedium" | ||
| 135 | android:layout_width="match_parent" | ||
| 136 | android:layout_height="wrap_content" | ||
| 137 | android:layout_marginHorizontal="24dp" | ||
| 138 | android:layout_marginTop="6dp" | ||
| 139 | android:text="@string/licenses_description" | ||
| 140 | android:textAlignment="viewStart" /> | ||
| 141 | |||
| 142 | </LinearLayout> | ||
| 143 | |||
| 144 | <com.google.android.material.divider.MaterialDivider | ||
| 145 | android:layout_width="match_parent" | ||
| 146 | android:layout_height="wrap_content" | ||
| 147 | android:layout_marginHorizontal="20dp" /> | ||
| 148 | |||
| 149 | <LinearLayout | ||
| 150 | android:id="@+id/button_build_hash" | ||
| 151 | android:layout_width="match_parent" | ||
| 152 | android:layout_height="wrap_content" | ||
| 153 | android:background="?attr/selectableItemBackground" | ||
| 154 | android:orientation="vertical" | ||
| 155 | android:paddingHorizontal="16dp" | ||
| 156 | android:paddingVertical="16dp"> | ||
| 157 | |||
| 158 | <com.google.android.material.textview.MaterialTextView | ||
| 159 | style="@style/TextAppearance.Material3.TitleMedium" | ||
| 160 | android:layout_width="match_parent" | ||
| 161 | android:layout_height="wrap_content" | ||
| 162 | android:layout_marginHorizontal="24dp" | ||
| 163 | android:text="@string/build" | ||
| 164 | android:textAlignment="viewStart" /> | ||
| 165 | |||
| 166 | <com.google.android.material.textview.MaterialTextView | ||
| 167 | android:id="@+id/text_build_hash" | ||
| 168 | style="@style/TextAppearance.Material3.BodyMedium" | ||
| 169 | android:layout_width="match_parent" | ||
| 170 | android:layout_height="wrap_content" | ||
| 171 | android:layout_marginHorizontal="24dp" | ||
| 172 | android:layout_marginTop="6dp" | ||
| 173 | android:textAlignment="viewStart" | ||
| 174 | tools:text="abc123" /> | ||
| 175 | |||
| 176 | </LinearLayout> | ||
| 177 | |||
| 178 | <com.google.android.material.divider.MaterialDivider | ||
| 179 | android:layout_width="match_parent" | ||
| 180 | android:layout_height="wrap_content" | ||
| 181 | android:layout_marginHorizontal="20dp" /> | ||
| 182 | |||
| 183 | <LinearLayout | ||
| 184 | android:layout_width="match_parent" | ||
| 185 | android:layout_height="wrap_content" | ||
| 186 | android:layout_marginHorizontal="40dp" | ||
| 187 | android:layout_marginTop="12dp" | ||
| 188 | android:layout_marginBottom="16dp" | ||
| 189 | android:gravity="center_horizontal" | ||
| 190 | android:orientation="horizontal"> | ||
| 191 | |||
| 192 | <Button | ||
| 193 | android:id="@+id/button_discord" | ||
| 194 | style="?attr/materialIconButtonStyle" | ||
| 195 | android:layout_width="0dp" | ||
| 196 | android:layout_height="wrap_content" | ||
| 197 | android:layout_weight="1" | ||
| 198 | app:icon="@drawable/ic_discord" | ||
| 199 | app:iconGravity="textEnd" | ||
| 200 | app:iconSize="24dp" | ||
| 201 | app:iconTint="?attr/colorOnSurface" /> | ||
| 202 | |||
| 203 | <Button | ||
| 204 | android:id="@+id/button_website" | ||
| 205 | style="?attr/materialIconButtonStyle" | ||
| 206 | android:layout_width="0dp" | ||
| 207 | android:layout_height="wrap_content" | ||
| 208 | android:layout_weight="1" | ||
| 209 | app:icon="@drawable/ic_website" | ||
| 210 | app:iconGravity="textEnd" | ||
| 211 | app:iconSize="24dp" | ||
| 212 | app:iconTint="?attr/colorOnSurface" /> | ||
| 213 | |||
| 214 | <Button | ||
| 215 | android:id="@+id/button_github" | ||
| 216 | style="?attr/materialIconButtonStyle" | ||
| 217 | android:layout_width="0dp" | ||
| 218 | android:layout_height="wrap_content" | ||
| 219 | android:layout_weight="1" | ||
| 220 | app:icon="@drawable/ic_github" | ||
| 221 | app:iconGravity="textEnd" | ||
| 222 | app:iconSize="24dp" | ||
| 223 | app:iconTint="?attr/colorOnSurface" /> | ||
| 224 | |||
| 225 | </LinearLayout> | ||
| 226 | |||
| 227 | </LinearLayout> | ||
| 228 | |||
| 229 | </LinearLayout> | ||
| 230 | |||
| 231 | </androidx.core.widget.NestedScrollView> | ||
| 232 | |||
| 233 | </androidx.coordinatorlayout.widget.CoordinatorLayout> | ||
diff --git a/src/android/app/src/main/res/layout/card_home_option.xml b/src/android/app/src/main/res/layout/card_home_option.xml index 6e8a232f9..cb667c928 100644 --- a/src/android/app/src/main/res/layout/card_home_option.xml +++ b/src/android/app/src/main/res/layout/card_home_option.xml | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | android:id="@+id/option_card" | 6 | android:id="@+id/option_card" |
| 7 | android:layout_width="match_parent" | 7 | android:layout_width="match_parent" |
| 8 | android:layout_height="wrap_content" | 8 | android:layout_height="wrap_content" |
| 9 | android:layout_marginVertical="12dp" | 9 | android:layout_marginBottom="24dp" |
| 10 | android:layout_marginHorizontal="16dp" | 10 | android:layout_marginHorizontal="12dp" |
| 11 | android:background="?attr/selectableItemBackground" | 11 | android:background="?attr/selectableItemBackground" |
| 12 | android:backgroundTint="?attr/colorSurfaceVariant" | 12 | android:backgroundTint="?attr/colorSurfaceVariant" |
| 13 | android:clickable="true" | 13 | android:clickable="true" |
diff --git a/src/android/app/src/main/res/layout/fragment_about.xml b/src/android/app/src/main/res/layout/fragment_about.xml index 3e1d98451..a24f5230e 100644 --- a/src/android/app/src/main/res/layout/fragment_about.xml +++ b/src/android/app/src/main/res/layout/fragment_about.xml | |||
| @@ -38,17 +38,17 @@ | |||
| 38 | 38 | ||
| 39 | <ImageView | 39 | <ImageView |
| 40 | android:id="@+id/image_logo" | 40 | android:id="@+id/image_logo" |
| 41 | android:layout_width="250dp" | 41 | android:layout_width="150dp" |
| 42 | android:layout_height="250dp" | 42 | android:layout_height="150dp" |
| 43 | android:layout_marginTop="20dp" | 43 | android:layout_marginTop="24dp" |
| 44 | android:layout_marginBottom="28dp" | ||
| 44 | android:layout_gravity="center_horizontal" | 45 | android:layout_gravity="center_horizontal" |
| 45 | android:src="@drawable/ic_yuzu_title" /> | 46 | android:src="@drawable/ic_yuzu_title" /> |
| 46 | 47 | ||
| 47 | <com.google.android.material.divider.MaterialDivider | 48 | <com.google.android.material.divider.MaterialDivider |
| 48 | android:layout_width="match_parent" | 49 | android:layout_width="match_parent" |
| 49 | android:layout_height="wrap_content" | 50 | android:layout_height="wrap_content" |
| 50 | android:layout_marginHorizontal="20dp" | 51 | android:layout_marginHorizontal="20dp" /> |
| 51 | android:layout_marginTop="28dp" /> | ||
| 52 | 52 | ||
| 53 | <LinearLayout | 53 | <LinearLayout |
| 54 | android:layout_width="match_parent" | 54 | android:layout_width="match_parent" |
diff --git a/src/android/app/src/main/res/layout/fragment_emulation.xml b/src/android/app/src/main/res/layout/fragment_emulation.xml index 750ce094a..cd6360b45 100644 --- a/src/android/app/src/main/res/layout/fragment_emulation.xml +++ b/src/android/app/src/main/res/layout/fragment_emulation.xml | |||
| @@ -134,16 +134,18 @@ | |||
| 134 | <FrameLayout | 134 | <FrameLayout |
| 135 | android:id="@+id/overlay_container" | 135 | android:id="@+id/overlay_container" |
| 136 | android:layout_width="match_parent" | 136 | android:layout_width="match_parent" |
| 137 | android:layout_height="match_parent"> | 137 | android:layout_height="match_parent" |
| 138 | android:fitsSystemWindows="true"> | ||
| 138 | 139 | ||
| 139 | <TextView | 140 | <com.google.android.material.textview.MaterialTextView |
| 140 | android:id="@+id/show_fps_text" | 141 | android:id="@+id/show_fps_text" |
| 142 | style="@style/TextAppearance.Material3.BodyMedium" | ||
| 141 | android:layout_width="wrap_content" | 143 | android:layout_width="wrap_content" |
| 142 | android:layout_height="wrap_content" | 144 | android:layout_height="wrap_content" |
| 143 | android:layout_gravity="left" | 145 | android:layout_gravity="left" |
| 144 | android:clickable="false" | 146 | android:clickable="false" |
| 145 | android:focusable="false" | 147 | android:focusable="false" |
| 146 | android:shadowColor="@android:color/black" | 148 | android:paddingHorizontal="20dp" |
| 147 | android:textColor="@android:color/white" | 149 | android:textColor="@android:color/white" |
| 148 | android:textSize="12sp" | 150 | android:textSize="12sp" |
| 149 | tools:ignore="RtlHardcoded" /> | 151 | tools:ignore="RtlHardcoded" /> |
diff --git a/src/android/app/src/main/res/layout/fragment_home_settings.xml b/src/android/app/src/main/res/layout/fragment_home_settings.xml index 1cb421dcb..d84093ba3 100644 --- a/src/android/app/src/main/res/layout/fragment_home_settings.xml +++ b/src/android/app/src/main/res/layout/fragment_home_settings.xml | |||
| @@ -14,13 +14,14 @@ | |||
| 14 | android:layout_width="match_parent" | 14 | android:layout_width="match_parent" |
| 15 | android:layout_height="match_parent" | 15 | android:layout_height="match_parent" |
| 16 | android:orientation="vertical" | 16 | android:orientation="vertical" |
| 17 | android:background="?attr/colorSurface"> | 17 | android:background="?attr/colorSurface" |
| 18 | android:paddingHorizontal="8dp"> | ||
| 18 | 19 | ||
| 19 | <ImageView | 20 | <ImageView |
| 20 | android:id="@+id/logo_image" | 21 | android:id="@+id/logo_image" |
| 21 | android:layout_width="128dp" | 22 | android:layout_width="96dp" |
| 22 | android:layout_height="128dp" | 23 | android:layout_height="96dp" |
| 23 | android:layout_margin="64dp" | 24 | android:layout_marginVertical="32dp" |
| 24 | android:layout_gravity="center_horizontal" | 25 | android:layout_gravity="center_horizontal" |
| 25 | android:src="@drawable/ic_yuzu_full" /> | 26 | android:src="@drawable/ic_yuzu_full" /> |
| 26 | 27 | ||
diff --git a/src/android/app/src/main/res/layout/list_item_setting.xml b/src/android/app/src/main/res/layout/list_item_setting.xml index f1037a740..544280e75 100644 --- a/src/android/app/src/main/res/layout/list_item_setting.xml +++ b/src/android/app/src/main/res/layout/list_item_setting.xml | |||
| @@ -10,41 +10,59 @@ | |||
| 10 | android:focusable="true" | 10 | android:focusable="true" |
| 11 | android:gravity="center_vertical" | 11 | android:gravity="center_vertical" |
| 12 | android:minHeight="72dp" | 12 | android:minHeight="72dp" |
| 13 | android:padding="@dimen/spacing_large"> | 13 | android:padding="16dp"> |
| 14 | 14 | ||
| 15 | <LinearLayout | 15 | <LinearLayout |
| 16 | android:layout_width="match_parent" | 16 | android:layout_width="match_parent" |
| 17 | android:layout_height="wrap_content" | 17 | android:layout_height="wrap_content" |
| 18 | android:orientation="vertical"> | 18 | android:orientation="horizontal"> |
| 19 | 19 | ||
| 20 | <com.google.android.material.textview.MaterialTextView | 20 | <ImageView |
| 21 | android:id="@+id/text_setting_name" | 21 | android:id="@+id/icon" |
| 22 | style="@style/TextAppearance.Material3.HeadlineMedium" | 22 | android:layout_width="24dp" |
| 23 | android:layout_width="match_parent" | 23 | android:layout_height="24dp" |
| 24 | android:layout_height="wrap_content" | 24 | android:layout_marginStart="8dp" |
| 25 | android:textAlignment="viewStart" | 25 | android:layout_marginEnd="24dp" |
| 26 | android:textSize="16sp" | 26 | android:layout_gravity="center_vertical" |
| 27 | app:lineHeight="22dp" | 27 | android:visibility="gone" |
| 28 | tools:text="Setting Name" /> | 28 | app:tint="?attr/colorOnSurface" /> |
| 29 | |||
| 30 | <com.google.android.material.textview.MaterialTextView | ||
| 31 | android:id="@+id/text_setting_description" | ||
| 32 | style="@style/TextAppearance.Material3.BodySmall" | ||
| 33 | android:layout_width="match_parent" | ||
| 34 | android:layout_height="wrap_content" | ||
| 35 | android:layout_marginTop="@dimen/spacing_small" | ||
| 36 | android:textAlignment="viewStart" | ||
| 37 | tools:text="@string/app_disclaimer" /> | ||
| 38 | 29 | ||
| 39 | <com.google.android.material.textview.MaterialTextView | 30 | <LinearLayout |
| 40 | android:id="@+id/text_setting_value" | ||
| 41 | style="@style/TextAppearance.Material3.LabelMedium" | ||
| 42 | android:layout_width="match_parent" | 31 | android:layout_width="match_parent" |
| 43 | android:layout_height="wrap_content" | 32 | android:layout_height="wrap_content" |
| 44 | android:layout_marginTop="@dimen/spacing_small" | 33 | android:orientation="vertical"> |
| 45 | android:textAlignment="viewStart" | 34 | |
| 46 | android:textStyle="bold" | 35 | <com.google.android.material.textview.MaterialTextView |
| 47 | tools:text="1x" /> | 36 | android:id="@+id/text_setting_name" |
| 37 | style="@style/TextAppearance.Material3.HeadlineMedium" | ||
| 38 | android:layout_width="match_parent" | ||
| 39 | android:layout_height="wrap_content" | ||
| 40 | android:textAlignment="viewStart" | ||
| 41 | android:textSize="17sp" | ||
| 42 | app:lineHeight="22dp" | ||
| 43 | tools:text="Setting Name" /> | ||
| 44 | |||
| 45 | <com.google.android.material.textview.MaterialTextView | ||
| 46 | android:id="@+id/text_setting_description" | ||
| 47 | style="@style/TextAppearance.Material3.BodySmall" | ||
| 48 | android:layout_width="match_parent" | ||
| 49 | android:layout_height="wrap_content" | ||
| 50 | android:layout_marginTop="@dimen/spacing_small" | ||
| 51 | android:textAlignment="viewStart" | ||
| 52 | tools:text="@string/app_disclaimer" /> | ||
| 53 | |||
| 54 | <com.google.android.material.textview.MaterialTextView | ||
| 55 | android:id="@+id/text_setting_value" | ||
| 56 | style="@style/TextAppearance.Material3.LabelMedium" | ||
| 57 | android:layout_width="match_parent" | ||
| 58 | android:layout_height="wrap_content" | ||
| 59 | android:layout_marginTop="@dimen/spacing_small" | ||
| 60 | android:textAlignment="viewStart" | ||
| 61 | android:textStyle="bold" | ||
| 62 | android:textSize="13sp" | ||
| 63 | tools:text="1x" /> | ||
| 64 | |||
| 65 | </LinearLayout> | ||
| 48 | 66 | ||
| 49 | </LinearLayout> | 67 | </LinearLayout> |
| 50 | 68 | ||
diff --git a/src/android/app/src/main/res/layout/list_item_setting_switch.xml b/src/android/app/src/main/res/layout/list_item_setting_switch.xml index a5767adee..a8f5aff78 100644 --- a/src/android/app/src/main/res/layout/list_item_setting_switch.xml +++ b/src/android/app/src/main/res/layout/list_item_setting_switch.xml | |||
| @@ -8,9 +8,7 @@ | |||
| 8 | android:clickable="true" | 8 | android:clickable="true" |
| 9 | android:focusable="true" | 9 | android:focusable="true" |
| 10 | android:minHeight="72dp" | 10 | android:minHeight="72dp" |
| 11 | android:paddingVertical="@dimen/spacing_large" | 11 | android:padding="16dp"> |
| 12 | android:paddingStart="@dimen/spacing_large" | ||
| 13 | android:paddingEnd="24dp"> | ||
| 14 | 12 | ||
| 15 | <com.google.android.material.materialswitch.MaterialSwitch | 13 | <com.google.android.material.materialswitch.MaterialSwitch |
| 16 | android:id="@+id/switch_widget" | 14 | android:id="@+id/switch_widget" |
| @@ -24,7 +22,7 @@ | |||
| 24 | android:layout_height="wrap_content" | 22 | android:layout_height="wrap_content" |
| 25 | android:layout_alignParentTop="true" | 23 | android:layout_alignParentTop="true" |
| 26 | android:layout_centerVertical="true" | 24 | android:layout_centerVertical="true" |
| 27 | android:layout_marginEnd="@dimen/spacing_large" | 25 | android:layout_marginEnd="24dp" |
| 28 | android:layout_toStartOf="@+id/switch_widget" | 26 | android:layout_toStartOf="@+id/switch_widget" |
| 29 | android:gravity="center_vertical" | 27 | android:gravity="center_vertical" |
| 30 | android:orientation="vertical"> | 28 | android:orientation="vertical"> |
| @@ -35,7 +33,7 @@ | |||
| 35 | android:layout_width="wrap_content" | 33 | android:layout_width="wrap_content" |
| 36 | android:layout_height="wrap_content" | 34 | android:layout_height="wrap_content" |
| 37 | android:textAlignment="viewStart" | 35 | android:textAlignment="viewStart" |
| 38 | android:textSize="16sp" | 36 | android:textSize="17sp" |
| 39 | app:lineHeight="28dp" | 37 | app:lineHeight="28dp" |
| 40 | tools:text="@string/frame_limit_enable" /> | 38 | tools:text="@string/frame_limit_enable" /> |
| 41 | 39 | ||
diff --git a/src/android/app/src/main/res/layout/list_item_settings_header.xml b/src/android/app/src/main/res/layout/list_item_settings_header.xml index cf85bc0da..21276b19e 100644 --- a/src/android/app/src/main/res/layout/list_item_settings_header.xml +++ b/src/android/app/src/main/res/layout/list_item_settings_header.xml | |||
| @@ -7,7 +7,8 @@ | |||
| 7 | android:layout_height="wrap_content" | 7 | android:layout_height="wrap_content" |
| 8 | android:layout_gravity="start|center_vertical" | 8 | android:layout_gravity="start|center_vertical" |
| 9 | android:paddingHorizontal="@dimen/spacing_large" | 9 | android:paddingHorizontal="@dimen/spacing_large" |
| 10 | android:paddingVertical="16dp" | 10 | android:paddingTop="16dp" |
| 11 | android:paddingBottom="8dp" | ||
| 11 | android:textAlignment="viewStart" | 12 | android:textAlignment="viewStart" |
| 12 | android:textColor="?attr/colorPrimary" | 13 | android:textColor="?attr/colorPrimary" |
| 13 | android:textStyle="bold" | 14 | android:textStyle="bold" |
diff --git a/src/android/app/src/main/res/values/arrays.xml b/src/android/app/src/main/res/values/arrays.xml index dc10159c9..51bcc49a3 100644 --- a/src/android/app/src/main/res/values/arrays.xml +++ b/src/android/app/src/main/res/values/arrays.xml | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | <resources> | 2 | <resources> |
| 3 | 3 | ||
| 4 | <string-array name="regionNames"> | 4 | <string-array name="regionNames"> |
| 5 | <item>@string/auto</item> | ||
| 6 | <item>@string/region_australia</item> | 5 | <item>@string/region_australia</item> |
| 7 | <item>@string/region_china</item> | 6 | <item>@string/region_china</item> |
| 8 | <item>@string/region_europe</item> | 7 | <item>@string/region_europe</item> |
| @@ -13,7 +12,6 @@ | |||
| 13 | </string-array> | 12 | </string-array> |
| 14 | 13 | ||
| 15 | <integer-array name="regionValues"> | 14 | <integer-array name="regionValues"> |
| 16 | <item>-1</item> | ||
| 17 | <item>3</item> | 15 | <item>3</item> |
| 18 | <item>4</item> | 16 | <item>4</item> |
| 19 | <item>2</item> | 17 | <item>2</item> |
diff --git a/src/android/app/src/main/res/values/strings.xml b/src/android/app/src/main/res/values/strings.xml index c551a6106..98c3f20f8 100644 --- a/src/android/app/src/main/res/values/strings.xml +++ b/src/android/app/src/main/res/values/strings.xml | |||
| @@ -240,6 +240,7 @@ | |||
| 240 | <string name="shutting_down">Shutting down…</string> | 240 | <string name="shutting_down">Shutting down…</string> |
| 241 | <string name="reset_setting_confirmation">Do you want to reset this setting back to its default value?</string> | 241 | <string name="reset_setting_confirmation">Do you want to reset this setting back to its default value?</string> |
| 242 | <string name="reset_to_default">Reset to default</string> | 242 | <string name="reset_to_default">Reset to default</string> |
| 243 | <string name="reset_to_default_description">Resets all advanced settings</string> | ||
| 243 | <string name="reset_all_settings">Reset all settings?</string> | 244 | <string name="reset_all_settings">Reset all settings?</string> |
| 244 | <string name="reset_all_settings_description">All advanced settings will be reset to their default configuration. This can not be undone.</string> | 245 | <string name="reset_all_settings_description">All advanced settings will be reset to their default configuration. This can not be undone.</string> |
| 245 | <string name="settings_reset">Settings reset</string> | 246 | <string name="settings_reset">Settings reset</string> |
| @@ -271,10 +272,14 @@ | |||
| 271 | <string name="preferences_settings">Settings</string> | 272 | <string name="preferences_settings">Settings</string> |
| 272 | <string name="preferences_general">General</string> | 273 | <string name="preferences_general">General</string> |
| 273 | <string name="preferences_system">System</string> | 274 | <string name="preferences_system">System</string> |
| 275 | <string name="preferences_system_description">Docked mode, region, language</string> | ||
| 274 | <string name="preferences_graphics">Graphics</string> | 276 | <string name="preferences_graphics">Graphics</string> |
| 277 | <string name="preferences_graphics_description">Accuracy level, resolution, shader cache</string> | ||
| 275 | <string name="preferences_audio">Audio</string> | 278 | <string name="preferences_audio">Audio</string> |
| 279 | <string name="preferences_audio_description">Output engine, volume</string> | ||
| 276 | <string name="preferences_theme">Theme and color</string> | 280 | <string name="preferences_theme">Theme and color</string> |
| 277 | <string name="preferences_debug">Debug</string> | 281 | <string name="preferences_debug">Debug</string> |
| 282 | <string name="preferences_debug_description">CPU/GPU debugging, graphics API, fastmem</string> | ||
| 278 | 283 | ||
| 279 | <!-- ROM loading errors --> | 284 | <!-- ROM loading errors --> |
| 280 | <string name="loader_error_encrypted">Your ROM is encrypted</string> | 285 | <string name="loader_error_encrypted">Your ROM is encrypted</string> |
diff --git a/src/audio_core/adsp/apps/opus/opus_decode_object.cpp b/src/audio_core/adsp/apps/opus/opus_decode_object.cpp index 2c16d3769..e2b9eb566 100644 --- a/src/audio_core/adsp/apps/opus/opus_decode_object.cpp +++ b/src/audio_core/adsp/apps/opus/opus_decode_object.cpp | |||
| @@ -1,107 +1,107 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "audio_core/adsp/apps/opus/opus_decode_object.h" | 4 | #include "audio_core/adsp/apps/opus/opus_decode_object.h" |
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | 6 | ||
| 7 | namespace AudioCore::ADSP::OpusDecoder { | 7 | namespace AudioCore::ADSP::OpusDecoder { |
| 8 | namespace { | 8 | namespace { |
| 9 | bool IsValidChannelCount(u32 channel_count) { | 9 | bool IsValidChannelCount(u32 channel_count) { |
| 10 | return channel_count == 1 || channel_count == 2; | 10 | return channel_count == 1 || channel_count == 2; |
| 11 | } | 11 | } |
| 12 | } // namespace | 12 | } // namespace |
| 13 | 13 | ||
| 14 | u32 OpusDecodeObject::GetWorkBufferSize(u32 channel_count) { | 14 | u32 OpusDecodeObject::GetWorkBufferSize(u32 channel_count) { |
| 15 | if (!IsValidChannelCount(channel_count)) { | 15 | if (!IsValidChannelCount(channel_count)) { |
| 16 | return 0; | 16 | return 0; |
| 17 | } | 17 | } |
| 18 | return static_cast<u32>(sizeof(OpusDecodeObject)) + opus_decoder_get_size(channel_count); | 18 | return static_cast<u32>(sizeof(OpusDecodeObject)) + opus_decoder_get_size(channel_count); |
| 19 | } | 19 | } |
| 20 | 20 | ||
| 21 | OpusDecodeObject& OpusDecodeObject::Initialize(u64 buffer, u64 buffer2) { | 21 | OpusDecodeObject& OpusDecodeObject::Initialize(u64 buffer, u64 buffer2) { |
| 22 | auto* new_decoder = reinterpret_cast<OpusDecodeObject*>(buffer); | 22 | auto* new_decoder = reinterpret_cast<OpusDecodeObject*>(buffer); |
| 23 | auto* comparison = reinterpret_cast<OpusDecodeObject*>(buffer2); | 23 | auto* comparison = reinterpret_cast<OpusDecodeObject*>(buffer2); |
| 24 | 24 | ||
| 25 | if (new_decoder->magic == DecodeObjectMagic) { | 25 | if (new_decoder->magic == DecodeObjectMagic) { |
| 26 | if (!new_decoder->initialized || | 26 | if (!new_decoder->initialized || |
| 27 | (new_decoder->initialized && new_decoder->self == comparison)) { | 27 | (new_decoder->initialized && new_decoder->self == comparison)) { |
| 28 | new_decoder->state_valid = true; | 28 | new_decoder->state_valid = true; |
| 29 | } | 29 | } |
| 30 | } else { | 30 | } else { |
| 31 | new_decoder->initialized = false; | 31 | new_decoder->initialized = false; |
| 32 | new_decoder->state_valid = true; | 32 | new_decoder->state_valid = true; |
| 33 | } | 33 | } |
| 34 | return *new_decoder; | 34 | return *new_decoder; |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | s32 OpusDecodeObject::InitializeDecoder(u32 sample_rate, u32 channel_count) { | 37 | s32 OpusDecodeObject::InitializeDecoder(u32 sample_rate, u32 channel_count) { |
| 38 | if (!state_valid) { | 38 | if (!state_valid) { |
| 39 | return OPUS_INVALID_STATE; | 39 | return OPUS_INVALID_STATE; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | if (initialized) { | 42 | if (initialized) { |
| 43 | return OPUS_OK; | 43 | return OPUS_OK; |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | // Unfortunately libopus does not expose the OpusDecoder struct publicly, so we can't include | 46 | // Unfortunately libopus does not expose the OpusDecoder struct publicly, so we can't include |
| 47 | // it in this class. Nintendo does not allocate memory, which is why we have a workbuffer | 47 | // it in this class. Nintendo does not allocate memory, which is why we have a workbuffer |
| 48 | // provided. | 48 | // provided. |
| 49 | // We could use _create and have libopus allocate it for us, but then we have to separately | 49 | // We could use _create and have libopus allocate it for us, but then we have to separately |
| 50 | // track which decoder is being used between this and multistream in order to call the correct | 50 | // track which decoder is being used between this and multistream in order to call the correct |
| 51 | // destroy from the host side. | 51 | // destroy from the host side. |
| 52 | // This is a bit cringe, but is safe as these objects are only ever initialized inside the given | 52 | // This is a bit cringe, but is safe as these objects are only ever initialized inside the given |
| 53 | // workbuffer, and GetWorkBufferSize will guarantee there's enough space to follow. | 53 | // workbuffer, and GetWorkBufferSize will guarantee there's enough space to follow. |
| 54 | decoder = (LibOpusDecoder*)(this + 1); | 54 | decoder = (LibOpusDecoder*)(this + 1); |
| 55 | s32 ret = opus_decoder_init(decoder, sample_rate, channel_count); | 55 | s32 ret = opus_decoder_init(decoder, sample_rate, channel_count); |
| 56 | if (ret == OPUS_OK) { | 56 | if (ret == OPUS_OK) { |
| 57 | magic = DecodeObjectMagic; | 57 | magic = DecodeObjectMagic; |
| 58 | initialized = true; | 58 | initialized = true; |
| 59 | state_valid = true; | 59 | state_valid = true; |
| 60 | self = this; | 60 | self = this; |
| 61 | final_range = 0; | 61 | final_range = 0; |
| 62 | } | 62 | } |
| 63 | return ret; | 63 | return ret; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | s32 OpusDecodeObject::Shutdown() { | 66 | s32 OpusDecodeObject::Shutdown() { |
| 67 | if (!state_valid) { | 67 | if (!state_valid) { |
| 68 | return OPUS_INVALID_STATE; | 68 | return OPUS_INVALID_STATE; |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | if (initialized) { | 71 | if (initialized) { |
| 72 | magic = 0x0; | 72 | magic = 0x0; |
| 73 | initialized = false; | 73 | initialized = false; |
| 74 | state_valid = false; | 74 | state_valid = false; |
| 75 | self = nullptr; | 75 | self = nullptr; |
| 76 | final_range = 0; | 76 | final_range = 0; |
| 77 | decoder = nullptr; | 77 | decoder = nullptr; |
| 78 | } | 78 | } |
| 79 | return OPUS_OK; | 79 | return OPUS_OK; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | s32 OpusDecodeObject::ResetDecoder() { | 82 | s32 OpusDecodeObject::ResetDecoder() { |
| 83 | return opus_decoder_ctl(decoder, OPUS_RESET_STATE); | 83 | return opus_decoder_ctl(decoder, OPUS_RESET_STATE); |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | s32 OpusDecodeObject::Decode(u32& out_sample_count, u64 output_data, u64 output_data_size, | 86 | s32 OpusDecodeObject::Decode(u32& out_sample_count, u64 output_data, u64 output_data_size, |
| 87 | u64 input_data, u64 input_data_size) { | 87 | u64 input_data, u64 input_data_size) { |
| 88 | ASSERT(initialized); | 88 | ASSERT(initialized); |
| 89 | out_sample_count = 0; | 89 | out_sample_count = 0; |
| 90 | 90 | ||
| 91 | if (!state_valid) { | 91 | if (!state_valid) { |
| 92 | return OPUS_INVALID_STATE; | 92 | return OPUS_INVALID_STATE; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | auto ret_code_or_samples = opus_decode( | 95 | auto ret_code_or_samples = opus_decode( |
| 96 | decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size), | 96 | decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size), |
| 97 | reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0); | 97 | reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0); |
| 98 | 98 | ||
| 99 | if (ret_code_or_samples < OPUS_OK) { | 99 | if (ret_code_or_samples < OPUS_OK) { |
| 100 | return ret_code_or_samples; | 100 | return ret_code_or_samples; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | out_sample_count = ret_code_or_samples; | 103 | out_sample_count = ret_code_or_samples; |
| 104 | return opus_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range); | 104 | return opus_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | } // namespace AudioCore::ADSP::OpusDecoder | 107 | } // namespace AudioCore::ADSP::OpusDecoder |
diff --git a/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp b/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp index f6d362e68..7f1ed0450 100644 --- a/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp +++ b/src/audio_core/adsp/apps/opus/opus_multistream_decode_object.cpp | |||
| @@ -1,111 +1,111 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "audio_core/adsp/apps/opus/opus_multistream_decode_object.h" | 4 | #include "audio_core/adsp/apps/opus/opus_multistream_decode_object.h" |
| 5 | #include "common/assert.h" | 5 | #include "common/assert.h" |
| 6 | 6 | ||
| 7 | namespace AudioCore::ADSP::OpusDecoder { | 7 | namespace AudioCore::ADSP::OpusDecoder { |
| 8 | 8 | ||
| 9 | namespace { | 9 | namespace { |
| 10 | bool IsValidChannelCount(u32 channel_count) { | 10 | bool IsValidChannelCount(u32 channel_count) { |
| 11 | return channel_count == 1 || channel_count == 2; | 11 | return channel_count == 1 || channel_count == 2; |
| 12 | } | 12 | } |
| 13 | 13 | ||
| 14 | bool IsValidStreamCounts(u32 total_stream_count, u32 stereo_stream_count) { | 14 | bool IsValidStreamCounts(u32 total_stream_count, u32 stereo_stream_count) { |
| 15 | return total_stream_count > 0 && stereo_stream_count > 0 && | 15 | return total_stream_count > 0 && stereo_stream_count > 0 && |
| 16 | stereo_stream_count <= total_stream_count && IsValidChannelCount(total_stream_count); | 16 | stereo_stream_count <= total_stream_count && IsValidChannelCount(total_stream_count); |
| 17 | } | 17 | } |
| 18 | } // namespace | 18 | } // namespace |
| 19 | 19 | ||
| 20 | u32 OpusMultiStreamDecodeObject::GetWorkBufferSize(u32 total_stream_count, | 20 | u32 OpusMultiStreamDecodeObject::GetWorkBufferSize(u32 total_stream_count, |
| 21 | u32 stereo_stream_count) { | 21 | u32 stereo_stream_count) { |
| 22 | if (IsValidStreamCounts(total_stream_count, stereo_stream_count)) { | 22 | if (IsValidStreamCounts(total_stream_count, stereo_stream_count)) { |
| 23 | return static_cast<u32>(sizeof(OpusMultiStreamDecodeObject)) + | 23 | return static_cast<u32>(sizeof(OpusMultiStreamDecodeObject)) + |
| 24 | opus_multistream_decoder_get_size(total_stream_count, stereo_stream_count); | 24 | opus_multistream_decoder_get_size(total_stream_count, stereo_stream_count); |
| 25 | } | 25 | } |
| 26 | return 0; | 26 | return 0; |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | OpusMultiStreamDecodeObject& OpusMultiStreamDecodeObject::Initialize(u64 buffer, u64 buffer2) { | 29 | OpusMultiStreamDecodeObject& OpusMultiStreamDecodeObject::Initialize(u64 buffer, u64 buffer2) { |
| 30 | auto* new_decoder = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer); | 30 | auto* new_decoder = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer); |
| 31 | auto* comparison = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer2); | 31 | auto* comparison = reinterpret_cast<OpusMultiStreamDecodeObject*>(buffer2); |
| 32 | 32 | ||
| 33 | if (new_decoder->magic == DecodeMultiStreamObjectMagic) { | 33 | if (new_decoder->magic == DecodeMultiStreamObjectMagic) { |
| 34 | if (!new_decoder->initialized || | 34 | if (!new_decoder->initialized || |
| 35 | (new_decoder->initialized && new_decoder->self == comparison)) { | 35 | (new_decoder->initialized && new_decoder->self == comparison)) { |
| 36 | new_decoder->state_valid = true; | 36 | new_decoder->state_valid = true; |
| 37 | } | 37 | } |
| 38 | } else { | 38 | } else { |
| 39 | new_decoder->initialized = false; | 39 | new_decoder->initialized = false; |
| 40 | new_decoder->state_valid = true; | 40 | new_decoder->state_valid = true; |
| 41 | } | 41 | } |
| 42 | return *new_decoder; | 42 | return *new_decoder; |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | s32 OpusMultiStreamDecodeObject::InitializeDecoder(u32 sample_rate, u32 total_stream_count, | 45 | s32 OpusMultiStreamDecodeObject::InitializeDecoder(u32 sample_rate, u32 total_stream_count, |
| 46 | u32 channel_count, u32 stereo_stream_count, | 46 | u32 channel_count, u32 stereo_stream_count, |
| 47 | u8* mappings) { | 47 | u8* mappings) { |
| 48 | if (!state_valid) { | 48 | if (!state_valid) { |
| 49 | return OPUS_INVALID_STATE; | 49 | return OPUS_INVALID_STATE; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | if (initialized) { | 52 | if (initialized) { |
| 53 | return OPUS_OK; | 53 | return OPUS_OK; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | // See OpusDecodeObject::InitializeDecoder for an explanation of this | 56 | // See OpusDecodeObject::InitializeDecoder for an explanation of this |
| 57 | decoder = (LibOpusMSDecoder*)(this + 1); | 57 | decoder = (LibOpusMSDecoder*)(this + 1); |
| 58 | s32 ret = opus_multistream_decoder_init(decoder, sample_rate, channel_count, total_stream_count, | 58 | s32 ret = opus_multistream_decoder_init(decoder, sample_rate, channel_count, total_stream_count, |
| 59 | stereo_stream_count, mappings); | 59 | stereo_stream_count, mappings); |
| 60 | if (ret == OPUS_OK) { | 60 | if (ret == OPUS_OK) { |
| 61 | magic = DecodeMultiStreamObjectMagic; | 61 | magic = DecodeMultiStreamObjectMagic; |
| 62 | initialized = true; | 62 | initialized = true; |
| 63 | state_valid = true; | 63 | state_valid = true; |
| 64 | self = this; | 64 | self = this; |
| 65 | final_range = 0; | 65 | final_range = 0; |
| 66 | } | 66 | } |
| 67 | return ret; | 67 | return ret; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | s32 OpusMultiStreamDecodeObject::Shutdown() { | 70 | s32 OpusMultiStreamDecodeObject::Shutdown() { |
| 71 | if (!state_valid) { | 71 | if (!state_valid) { |
| 72 | return OPUS_INVALID_STATE; | 72 | return OPUS_INVALID_STATE; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | if (initialized) { | 75 | if (initialized) { |
| 76 | magic = 0x0; | 76 | magic = 0x0; |
| 77 | initialized = false; | 77 | initialized = false; |
| 78 | state_valid = false; | 78 | state_valid = false; |
| 79 | self = nullptr; | 79 | self = nullptr; |
| 80 | final_range = 0; | 80 | final_range = 0; |
| 81 | decoder = nullptr; | 81 | decoder = nullptr; |
| 82 | } | 82 | } |
| 83 | return OPUS_OK; | 83 | return OPUS_OK; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | s32 OpusMultiStreamDecodeObject::ResetDecoder() { | 86 | s32 OpusMultiStreamDecodeObject::ResetDecoder() { |
| 87 | return opus_multistream_decoder_ctl(decoder, OPUS_RESET_STATE); | 87 | return opus_multistream_decoder_ctl(decoder, OPUS_RESET_STATE); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | s32 OpusMultiStreamDecodeObject::Decode(u32& out_sample_count, u64 output_data, | 90 | s32 OpusMultiStreamDecodeObject::Decode(u32& out_sample_count, u64 output_data, |
| 91 | u64 output_data_size, u64 input_data, u64 input_data_size) { | 91 | u64 output_data_size, u64 input_data, u64 input_data_size) { |
| 92 | ASSERT(initialized); | 92 | ASSERT(initialized); |
| 93 | out_sample_count = 0; | 93 | out_sample_count = 0; |
| 94 | 94 | ||
| 95 | if (!state_valid) { | 95 | if (!state_valid) { |
| 96 | return OPUS_INVALID_STATE; | 96 | return OPUS_INVALID_STATE; |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | auto ret_code_or_samples = opus_multistream_decode( | 99 | auto ret_code_or_samples = opus_multistream_decode( |
| 100 | decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size), | 100 | decoder, reinterpret_cast<const u8*>(input_data), static_cast<opus_int32>(input_data_size), |
| 101 | reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0); | 101 | reinterpret_cast<opus_int16*>(output_data), static_cast<opus_int32>(output_data_size), 0); |
| 102 | 102 | ||
| 103 | if (ret_code_or_samples < OPUS_OK) { | 103 | if (ret_code_or_samples < OPUS_OK) { |
| 104 | return ret_code_or_samples; | 104 | return ret_code_or_samples; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | out_sample_count = ret_code_or_samples; | 107 | out_sample_count = ret_code_or_samples; |
| 108 | return opus_multistream_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range); | 108 | return opus_multistream_decoder_ctl(decoder, OPUS_GET_FINAL_RANGE_REQUEST, &final_range); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | } // namespace AudioCore::ADSP::OpusDecoder | 111 | } // namespace AudioCore::ADSP::OpusDecoder |
diff --git a/src/audio_core/opus/decoder.cpp b/src/audio_core/opus/decoder.cpp index 5b23fce14..c6fd45f47 100644 --- a/src/audio_core/opus/decoder.cpp +++ b/src/audio_core/opus/decoder.cpp | |||
| @@ -1,179 +1,179 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "audio_core/opus/decoder.h" | 4 | #include "audio_core/opus/decoder.h" |
| 5 | #include "audio_core/opus/hardware_opus.h" | 5 | #include "audio_core/opus/hardware_opus.h" |
| 6 | #include "audio_core/opus/parameters.h" | 6 | #include "audio_core/opus/parameters.h" |
| 7 | #include "common/alignment.h" | 7 | #include "common/alignment.h" |
| 8 | #include "common/swap.h" | 8 | #include "common/swap.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | 10 | ||
| 11 | namespace AudioCore::OpusDecoder { | 11 | namespace AudioCore::OpusDecoder { |
| 12 | using namespace Service::Audio; | 12 | using namespace Service::Audio; |
| 13 | namespace { | 13 | namespace { |
| 14 | OpusPacketHeader ReverseHeader(OpusPacketHeader header) { | 14 | OpusPacketHeader ReverseHeader(OpusPacketHeader header) { |
| 15 | OpusPacketHeader out; | 15 | OpusPacketHeader out; |
| 16 | out.size = Common::swap32(header.size); | 16 | out.size = Common::swap32(header.size); |
| 17 | out.final_range = Common::swap32(header.final_range); | 17 | out.final_range = Common::swap32(header.final_range); |
| 18 | return out; | 18 | return out; |
| 19 | } | 19 | } |
| 20 | } // namespace | 20 | } // namespace |
| 21 | 21 | ||
| 22 | OpusDecoder::OpusDecoder(Core::System& system_, HardwareOpus& hardware_opus_) | 22 | OpusDecoder::OpusDecoder(Core::System& system_, HardwareOpus& hardware_opus_) |
| 23 | : system{system_}, hardware_opus{hardware_opus_} {} | 23 | : system{system_}, hardware_opus{hardware_opus_} {} |
| 24 | 24 | ||
| 25 | OpusDecoder::~OpusDecoder() { | 25 | OpusDecoder::~OpusDecoder() { |
| 26 | if (decode_object_initialized) { | 26 | if (decode_object_initialized) { |
| 27 | hardware_opus.ShutdownDecodeObject(shared_buffer.get(), shared_buffer_size); | 27 | hardware_opus.ShutdownDecodeObject(shared_buffer.get(), shared_buffer_size); |
| 28 | } | 28 | } |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | Result OpusDecoder::Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory, | 31 | Result OpusDecoder::Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory, |
| 32 | u64 transfer_memory_size) { | 32 | u64 transfer_memory_size) { |
| 33 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; | 33 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; |
| 34 | shared_buffer_size = transfer_memory_size; | 34 | shared_buffer_size = transfer_memory_size; |
| 35 | shared_buffer = std::make_unique<u8[]>(shared_buffer_size); | 35 | shared_buffer = std::make_unique<u8[]>(shared_buffer_size); |
| 36 | shared_memory_mapped = true; | 36 | shared_memory_mapped = true; |
| 37 | 37 | ||
| 38 | buffer_size = | 38 | buffer_size = |
| 39 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16); | 39 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16); |
| 40 | 40 | ||
| 41 | out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size}; | 41 | out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size}; |
| 42 | size_t in_data_size{0x600u}; | 42 | size_t in_data_size{0x600u}; |
| 43 | in_data = {out_data.data() - in_data_size, in_data_size}; | 43 | in_data = {out_data.data() - in_data_size, in_data_size}; |
| 44 | 44 | ||
| 45 | ON_RESULT_FAILURE { | 45 | ON_RESULT_FAILURE { |
| 46 | if (shared_memory_mapped) { | 46 | if (shared_memory_mapped) { |
| 47 | shared_memory_mapped = false; | 47 | shared_memory_mapped = false; |
| 48 | ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size))); | 48 | ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size))); |
| 49 | } | 49 | } |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | R_TRY(hardware_opus.InitializeDecodeObject(params.sample_rate, params.channel_count, | 52 | R_TRY(hardware_opus.InitializeDecodeObject(params.sample_rate, params.channel_count, |
| 53 | shared_buffer.get(), shared_buffer_size)); | 53 | shared_buffer.get(), shared_buffer_size)); |
| 54 | 54 | ||
| 55 | sample_rate = params.sample_rate; | 55 | sample_rate = params.sample_rate; |
| 56 | channel_count = params.channel_count; | 56 | channel_count = params.channel_count; |
| 57 | use_large_frame_size = params.use_large_frame_size; | 57 | use_large_frame_size = params.use_large_frame_size; |
| 58 | decode_object_initialized = true; | 58 | decode_object_initialized = true; |
| 59 | R_SUCCEED(); | 59 | R_SUCCEED(); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | Result OpusDecoder::Initialize(OpusMultiStreamParametersEx& params, | 62 | Result OpusDecoder::Initialize(OpusMultiStreamParametersEx& params, |
| 63 | Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size) { | 63 | Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size) { |
| 64 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; | 64 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; |
| 65 | shared_buffer_size = transfer_memory_size; | 65 | shared_buffer_size = transfer_memory_size; |
| 66 | shared_buffer = std::make_unique<u8[]>(shared_buffer_size); | 66 | shared_buffer = std::make_unique<u8[]>(shared_buffer_size); |
| 67 | shared_memory_mapped = true; | 67 | shared_memory_mapped = true; |
| 68 | 68 | ||
| 69 | buffer_size = | 69 | buffer_size = |
| 70 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16); | 70 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 16); |
| 71 | 71 | ||
| 72 | out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size}; | 72 | out_data = {shared_buffer.get() + shared_buffer_size - buffer_size, buffer_size}; |
| 73 | size_t in_data_size{Common::AlignUp(1500ull * params.total_stream_count, 64u)}; | 73 | size_t in_data_size{Common::AlignUp(1500ull * params.total_stream_count, 64u)}; |
| 74 | in_data = {out_data.data() - in_data_size, in_data_size}; | 74 | in_data = {out_data.data() - in_data_size, in_data_size}; |
| 75 | 75 | ||
| 76 | ON_RESULT_FAILURE { | 76 | ON_RESULT_FAILURE { |
| 77 | if (shared_memory_mapped) { | 77 | if (shared_memory_mapped) { |
| 78 | shared_memory_mapped = false; | 78 | shared_memory_mapped = false; |
| 79 | ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size))); | 79 | ASSERT(R_SUCCEEDED(hardware_opus.UnmapMemory(shared_buffer.get(), shared_buffer_size))); |
| 80 | } | 80 | } |
| 81 | }; | 81 | }; |
| 82 | 82 | ||
| 83 | R_TRY(hardware_opus.InitializeMultiStreamDecodeObject( | 83 | R_TRY(hardware_opus.InitializeMultiStreamDecodeObject( |
| 84 | params.sample_rate, params.channel_count, params.total_stream_count, | 84 | params.sample_rate, params.channel_count, params.total_stream_count, |
| 85 | params.stereo_stream_count, params.mappings.data(), shared_buffer.get(), | 85 | params.stereo_stream_count, params.mappings.data(), shared_buffer.get(), |
| 86 | shared_buffer_size)); | 86 | shared_buffer_size)); |
| 87 | 87 | ||
| 88 | sample_rate = params.sample_rate; | 88 | sample_rate = params.sample_rate; |
| 89 | channel_count = params.channel_count; | 89 | channel_count = params.channel_count; |
| 90 | total_stream_count = params.total_stream_count; | 90 | total_stream_count = params.total_stream_count; |
| 91 | stereo_stream_count = params.stereo_stream_count; | 91 | stereo_stream_count = params.stereo_stream_count; |
| 92 | use_large_frame_size = params.use_large_frame_size; | 92 | use_large_frame_size = params.use_large_frame_size; |
| 93 | decode_object_initialized = true; | 93 | decode_object_initialized = true; |
| 94 | R_SUCCEED(); | 94 | R_SUCCEED(); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | Result OpusDecoder::DecodeInterleaved(u32* out_data_size, u64* out_time_taken, | 97 | Result OpusDecoder::DecodeInterleaved(u32* out_data_size, u64* out_time_taken, |
| 98 | u32* out_sample_count, std::span<const u8> input_data, | 98 | u32* out_sample_count, std::span<const u8> input_data, |
| 99 | std::span<u8> output_data, bool reset) { | 99 | std::span<u8> output_data, bool reset) { |
| 100 | u32 out_samples; | 100 | u32 out_samples; |
| 101 | u64 time_taken{}; | 101 | u64 time_taken{}; |
| 102 | 102 | ||
| 103 | R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall); | 103 | R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall); |
| 104 | 104 | ||
| 105 | auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())}; | 105 | auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())}; |
| 106 | OpusPacketHeader header{ReverseHeader(*header_p)}; | 106 | OpusPacketHeader header{ReverseHeader(*header_p)}; |
| 107 | 107 | ||
| 108 | R_UNLESS(in_data.size_bytes() >= header.size && | 108 | R_UNLESS(in_data.size_bytes() >= header.size && |
| 109 | header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(), | 109 | header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(), |
| 110 | ResultBufferTooSmall); | 110 | ResultBufferTooSmall); |
| 111 | 111 | ||
| 112 | if (!shared_memory_mapped) { | 112 | if (!shared_memory_mapped) { |
| 113 | R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size)); | 113 | R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size)); |
| 114 | shared_memory_mapped = true; | 114 | shared_memory_mapped = true; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size); | 117 | std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size); |
| 118 | 118 | ||
| 119 | R_TRY(hardware_opus.DecodeInterleaved(out_samples, out_data.data(), out_data.size_bytes(), | 119 | R_TRY(hardware_opus.DecodeInterleaved(out_samples, out_data.data(), out_data.size_bytes(), |
| 120 | channel_count, in_data.data(), header.size, | 120 | channel_count, in_data.data(), header.size, |
| 121 | shared_buffer.get(), time_taken, reset)); | 121 | shared_buffer.get(), time_taken, reset)); |
| 122 | 122 | ||
| 123 | std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16)); | 123 | std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16)); |
| 124 | 124 | ||
| 125 | *out_data_size = header.size + sizeof(OpusPacketHeader); | 125 | *out_data_size = header.size + sizeof(OpusPacketHeader); |
| 126 | *out_sample_count = out_samples; | 126 | *out_sample_count = out_samples; |
| 127 | if (out_time_taken) { | 127 | if (out_time_taken) { |
| 128 | *out_time_taken = time_taken / 1000; | 128 | *out_time_taken = time_taken / 1000; |
| 129 | } | 129 | } |
| 130 | R_SUCCEED(); | 130 | R_SUCCEED(); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | Result OpusDecoder::SetContext([[maybe_unused]] std::span<const u8> context) { | 133 | Result OpusDecoder::SetContext([[maybe_unused]] std::span<const u8> context) { |
| 134 | R_SUCCEED_IF(shared_memory_mapped); | 134 | R_SUCCEED_IF(shared_memory_mapped); |
| 135 | shared_memory_mapped = true; | 135 | shared_memory_mapped = true; |
| 136 | R_RETURN(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size)); | 136 | R_RETURN(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size)); |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | Result OpusDecoder::DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken, | 139 | Result OpusDecoder::DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken, |
| 140 | u32* out_sample_count, | 140 | u32* out_sample_count, |
| 141 | std::span<const u8> input_data, | 141 | std::span<const u8> input_data, |
| 142 | std::span<u8> output_data, bool reset) { | 142 | std::span<u8> output_data, bool reset) { |
| 143 | u32 out_samples; | 143 | u32 out_samples; |
| 144 | u64 time_taken{}; | 144 | u64 time_taken{}; |
| 145 | 145 | ||
| 146 | R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall); | 146 | R_UNLESS(input_data.size_bytes() > sizeof(OpusPacketHeader), ResultInputDataTooSmall); |
| 147 | 147 | ||
| 148 | auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())}; | 148 | auto* header_p{reinterpret_cast<const OpusPacketHeader*>(input_data.data())}; |
| 149 | OpusPacketHeader header{ReverseHeader(*header_p)}; | 149 | OpusPacketHeader header{ReverseHeader(*header_p)}; |
| 150 | 150 | ||
| 151 | LOG_ERROR(Service_Audio, "header size 0x{:X} input data size 0x{:X} in_data size 0x{:X}", | 151 | LOG_ERROR(Service_Audio, "header size 0x{:X} input data size 0x{:X} in_data size 0x{:X}", |
| 152 | header.size, input_data.size_bytes(), in_data.size_bytes()); | 152 | header.size, input_data.size_bytes(), in_data.size_bytes()); |
| 153 | 153 | ||
| 154 | R_UNLESS(in_data.size_bytes() >= header.size && | 154 | R_UNLESS(in_data.size_bytes() >= header.size && |
| 155 | header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(), | 155 | header.size + sizeof(OpusPacketHeader) <= input_data.size_bytes(), |
| 156 | ResultBufferTooSmall); | 156 | ResultBufferTooSmall); |
| 157 | 157 | ||
| 158 | if (!shared_memory_mapped) { | 158 | if (!shared_memory_mapped) { |
| 159 | R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size)); | 159 | R_TRY(hardware_opus.MapMemory(shared_buffer.get(), shared_buffer_size)); |
| 160 | shared_memory_mapped = true; | 160 | shared_memory_mapped = true; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size); | 163 | std::memcpy(in_data.data(), input_data.data() + sizeof(OpusPacketHeader), header.size); |
| 164 | 164 | ||
| 165 | R_TRY(hardware_opus.DecodeInterleavedForMultiStream( | 165 | R_TRY(hardware_opus.DecodeInterleavedForMultiStream( |
| 166 | out_samples, out_data.data(), out_data.size_bytes(), channel_count, in_data.data(), | 166 | out_samples, out_data.data(), out_data.size_bytes(), channel_count, in_data.data(), |
| 167 | header.size, shared_buffer.get(), time_taken, reset)); | 167 | header.size, shared_buffer.get(), time_taken, reset)); |
| 168 | 168 | ||
| 169 | std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16)); | 169 | std::memcpy(output_data.data(), out_data.data(), out_samples * channel_count * sizeof(s16)); |
| 170 | 170 | ||
| 171 | *out_data_size = header.size + sizeof(OpusPacketHeader); | 171 | *out_data_size = header.size + sizeof(OpusPacketHeader); |
| 172 | *out_sample_count = out_samples; | 172 | *out_sample_count = out_samples; |
| 173 | if (out_time_taken) { | 173 | if (out_time_taken) { |
| 174 | *out_time_taken = time_taken / 1000; | 174 | *out_time_taken = time_taken / 1000; |
| 175 | } | 175 | } |
| 176 | R_SUCCEED(); | 176 | R_SUCCEED(); |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | } // namespace AudioCore::OpusDecoder | 179 | } // namespace AudioCore::OpusDecoder |
diff --git a/src/audio_core/opus/decoder.h b/src/audio_core/opus/decoder.h index d08d8a4a4..fd728958a 100644 --- a/src/audio_core/opus/decoder.h +++ b/src/audio_core/opus/decoder.h | |||
| @@ -1,53 +1,53 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <span> | 6 | #include <span> |
| 7 | 7 | ||
| 8 | #include "audio_core/opus/parameters.h" | 8 | #include "audio_core/opus/parameters.h" |
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/k_transfer_memory.h" | 10 | #include "core/hle/kernel/k_transfer_memory.h" |
| 11 | #include "core/hle/service/audio/errors.h" | 11 | #include "core/hle/service/audio/errors.h" |
| 12 | 12 | ||
| 13 | namespace Core { | 13 | namespace Core { |
| 14 | class System; | 14 | class System; |
| 15 | } | 15 | } |
| 16 | 16 | ||
| 17 | namespace AudioCore::OpusDecoder { | 17 | namespace AudioCore::OpusDecoder { |
| 18 | class HardwareOpus; | 18 | class HardwareOpus; |
| 19 | 19 | ||
| 20 | class OpusDecoder { | 20 | class OpusDecoder { |
| 21 | public: | 21 | public: |
| 22 | explicit OpusDecoder(Core::System& system, HardwareOpus& hardware_opus_); | 22 | explicit OpusDecoder(Core::System& system, HardwareOpus& hardware_opus_); |
| 23 | ~OpusDecoder(); | 23 | ~OpusDecoder(); |
| 24 | 24 | ||
| 25 | Result Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory, | 25 | Result Initialize(OpusParametersEx& params, Kernel::KTransferMemory* transfer_memory, |
| 26 | u64 transfer_memory_size); | 26 | u64 transfer_memory_size); |
| 27 | Result Initialize(OpusMultiStreamParametersEx& params, Kernel::KTransferMemory* transfer_memory, | 27 | Result Initialize(OpusMultiStreamParametersEx& params, Kernel::KTransferMemory* transfer_memory, |
| 28 | u64 transfer_memory_size); | 28 | u64 transfer_memory_size); |
| 29 | Result DecodeInterleaved(u32* out_data_size, u64* out_time_taken, u32* out_sample_count, | 29 | Result DecodeInterleaved(u32* out_data_size, u64* out_time_taken, u32* out_sample_count, |
| 30 | std::span<const u8> input_data, std::span<u8> output_data, bool reset); | 30 | std::span<const u8> input_data, std::span<u8> output_data, bool reset); |
| 31 | Result SetContext([[maybe_unused]] std::span<const u8> context); | 31 | Result SetContext([[maybe_unused]] std::span<const u8> context); |
| 32 | Result DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken, | 32 | Result DecodeInterleavedForMultiStream(u32* out_data_size, u64* out_time_taken, |
| 33 | u32* out_sample_count, std::span<const u8> input_data, | 33 | u32* out_sample_count, std::span<const u8> input_data, |
| 34 | std::span<u8> output_data, bool reset); | 34 | std::span<u8> output_data, bool reset); |
| 35 | 35 | ||
| 36 | private: | 36 | private: |
| 37 | Core::System& system; | 37 | Core::System& system; |
| 38 | HardwareOpus& hardware_opus; | 38 | HardwareOpus& hardware_opus; |
| 39 | std::unique_ptr<u8[]> shared_buffer{}; | 39 | std::unique_ptr<u8[]> shared_buffer{}; |
| 40 | u64 shared_buffer_size; | 40 | u64 shared_buffer_size; |
| 41 | std::span<u8> in_data{}; | 41 | std::span<u8> in_data{}; |
| 42 | std::span<u8> out_data{}; | 42 | std::span<u8> out_data{}; |
| 43 | u64 buffer_size{}; | 43 | u64 buffer_size{}; |
| 44 | s32 sample_rate{}; | 44 | s32 sample_rate{}; |
| 45 | s32 channel_count{}; | 45 | s32 channel_count{}; |
| 46 | bool use_large_frame_size{false}; | 46 | bool use_large_frame_size{false}; |
| 47 | s32 total_stream_count{}; | 47 | s32 total_stream_count{}; |
| 48 | s32 stereo_stream_count{}; | 48 | s32 stereo_stream_count{}; |
| 49 | bool shared_memory_mapped{false}; | 49 | bool shared_memory_mapped{false}; |
| 50 | bool decode_object_initialized{false}; | 50 | bool decode_object_initialized{false}; |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | } // namespace AudioCore::OpusDecoder | 53 | } // namespace AudioCore::OpusDecoder |
diff --git a/src/audio_core/opus/decoder_manager.cpp b/src/audio_core/opus/decoder_manager.cpp index fdeccdf50..1464880a1 100644 --- a/src/audio_core/opus/decoder_manager.cpp +++ b/src/audio_core/opus/decoder_manager.cpp | |||
| @@ -1,102 +1,102 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include "audio_core/adsp/apps/opus/opus_decoder.h" | 4 | #include "audio_core/adsp/apps/opus/opus_decoder.h" |
| 5 | #include "audio_core/opus/decoder_manager.h" | 5 | #include "audio_core/opus/decoder_manager.h" |
| 6 | #include "common/alignment.h" | 6 | #include "common/alignment.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | 8 | ||
| 9 | namespace AudioCore::OpusDecoder { | 9 | namespace AudioCore::OpusDecoder { |
| 10 | using namespace Service::Audio; | 10 | using namespace Service::Audio; |
| 11 | 11 | ||
| 12 | namespace { | 12 | namespace { |
| 13 | bool IsValidChannelCount(u32 channel_count) { | 13 | bool IsValidChannelCount(u32 channel_count) { |
| 14 | return channel_count == 1 || channel_count == 2; | 14 | return channel_count == 1 || channel_count == 2; |
| 15 | } | 15 | } |
| 16 | 16 | ||
| 17 | bool IsValidMultiStreamChannelCount(u32 channel_count) { | 17 | bool IsValidMultiStreamChannelCount(u32 channel_count) { |
| 18 | return channel_count > 0 && channel_count <= OpusStreamCountMax; | 18 | return channel_count > 0 && channel_count <= OpusStreamCountMax; |
| 19 | } | 19 | } |
| 20 | 20 | ||
| 21 | bool IsValidSampleRate(u32 sample_rate) { | 21 | bool IsValidSampleRate(u32 sample_rate) { |
| 22 | return sample_rate == 8'000 || sample_rate == 12'000 || sample_rate == 16'000 || | 22 | return sample_rate == 8'000 || sample_rate == 12'000 || sample_rate == 16'000 || |
| 23 | sample_rate == 24'000 || sample_rate == 48'000; | 23 | sample_rate == 24'000 || sample_rate == 48'000; |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | bool IsValidStreamCount(u32 channel_count, u32 total_stream_count, u32 stereo_stream_count) { | 26 | bool IsValidStreamCount(u32 channel_count, u32 total_stream_count, u32 stereo_stream_count) { |
| 27 | return total_stream_count > 0 && static_cast<s32>(stereo_stream_count) >= 0 && | 27 | return total_stream_count > 0 && static_cast<s32>(stereo_stream_count) >= 0 && |
| 28 | stereo_stream_count <= total_stream_count && | 28 | stereo_stream_count <= total_stream_count && |
| 29 | total_stream_count + stereo_stream_count <= channel_count; | 29 | total_stream_count + stereo_stream_count <= channel_count; |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | } // namespace | 32 | } // namespace |
| 33 | 33 | ||
| 34 | OpusDecoderManager::OpusDecoderManager(Core::System& system_) | 34 | OpusDecoderManager::OpusDecoderManager(Core::System& system_) |
| 35 | : system{system_}, hardware_opus{system} { | 35 | : system{system_}, hardware_opus{system} { |
| 36 | for (u32 i = 0; i < MaxChannels; i++) { | 36 | for (u32 i = 0; i < MaxChannels; i++) { |
| 37 | required_workbuffer_sizes[i] = hardware_opus.GetWorkBufferSize(1 + i); | 37 | required_workbuffer_sizes[i] = hardware_opus.GetWorkBufferSize(1 + i); |
| 38 | } | 38 | } |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | Result OpusDecoderManager::GetWorkBufferSize(OpusParameters& params, u64& out_size) { | 41 | Result OpusDecoderManager::GetWorkBufferSize(OpusParameters& params, u64& out_size) { |
| 42 | OpusParametersEx ex{ | 42 | OpusParametersEx ex{ |
| 43 | .sample_rate = params.sample_rate, | 43 | .sample_rate = params.sample_rate, |
| 44 | .channel_count = params.channel_count, | 44 | .channel_count = params.channel_count, |
| 45 | .use_large_frame_size = false, | 45 | .use_large_frame_size = false, |
| 46 | }; | 46 | }; |
| 47 | R_RETURN(GetWorkBufferSizeExEx(ex, out_size)); | 47 | R_RETURN(GetWorkBufferSizeExEx(ex, out_size)); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | Result OpusDecoderManager::GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size) { | 50 | Result OpusDecoderManager::GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size) { |
| 51 | R_RETURN(GetWorkBufferSizeExEx(params, out_size)); | 51 | R_RETURN(GetWorkBufferSizeExEx(params, out_size)); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | Result OpusDecoderManager::GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size) { | 54 | Result OpusDecoderManager::GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size) { |
| 55 | R_UNLESS(IsValidChannelCount(params.channel_count), ResultInvalidOpusChannelCount); | 55 | R_UNLESS(IsValidChannelCount(params.channel_count), ResultInvalidOpusChannelCount); |
| 56 | R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate); | 56 | R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate); |
| 57 | 57 | ||
| 58 | auto work_buffer_size{required_workbuffer_sizes[params.channel_count - 1]}; | 58 | auto work_buffer_size{required_workbuffer_sizes[params.channel_count - 1]}; |
| 59 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; | 59 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; |
| 60 | work_buffer_size += | 60 | work_buffer_size += |
| 61 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64); | 61 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64); |
| 62 | out_size = work_buffer_size + 0x600; | 62 | out_size = work_buffer_size + 0x600; |
| 63 | R_SUCCEED(); | 63 | R_SUCCEED(); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | Result OpusDecoderManager::GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params, | 66 | Result OpusDecoderManager::GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params, |
| 67 | u64& out_size) { | 67 | u64& out_size) { |
| 68 | OpusMultiStreamParametersEx ex{ | 68 | OpusMultiStreamParametersEx ex{ |
| 69 | .sample_rate = params.sample_rate, | 69 | .sample_rate = params.sample_rate, |
| 70 | .channel_count = params.channel_count, | 70 | .channel_count = params.channel_count, |
| 71 | .total_stream_count = params.total_stream_count, | 71 | .total_stream_count = params.total_stream_count, |
| 72 | .stereo_stream_count = params.stereo_stream_count, | 72 | .stereo_stream_count = params.stereo_stream_count, |
| 73 | .use_large_frame_size = false, | 73 | .use_large_frame_size = false, |
| 74 | .mappings = {}, | 74 | .mappings = {}, |
| 75 | }; | 75 | }; |
| 76 | R_RETURN(GetWorkBufferSizeForMultiStreamExEx(ex, out_size)); | 76 | R_RETURN(GetWorkBufferSizeForMultiStreamExEx(ex, out_size)); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params, | 79 | Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params, |
| 80 | u64& out_size) { | 80 | u64& out_size) { |
| 81 | R_RETURN(GetWorkBufferSizeForMultiStreamExEx(params, out_size)); | 81 | R_RETURN(GetWorkBufferSizeForMultiStreamExEx(params, out_size)); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params, | 84 | Result OpusDecoderManager::GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params, |
| 85 | u64& out_size) { | 85 | u64& out_size) { |
| 86 | R_UNLESS(IsValidMultiStreamChannelCount(params.channel_count), ResultInvalidOpusChannelCount); | 86 | R_UNLESS(IsValidMultiStreamChannelCount(params.channel_count), ResultInvalidOpusChannelCount); |
| 87 | R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate); | 87 | R_UNLESS(IsValidSampleRate(params.sample_rate), ResultInvalidOpusSampleRate); |
| 88 | R_UNLESS(IsValidStreamCount(params.channel_count, params.total_stream_count, | 88 | R_UNLESS(IsValidStreamCount(params.channel_count, params.total_stream_count, |
| 89 | params.stereo_stream_count), | 89 | params.stereo_stream_count), |
| 90 | ResultInvalidOpusSampleRate); | 90 | ResultInvalidOpusSampleRate); |
| 91 | 91 | ||
| 92 | auto work_buffer_size{hardware_opus.GetWorkBufferSizeForMultiStream( | 92 | auto work_buffer_size{hardware_opus.GetWorkBufferSizeForMultiStream( |
| 93 | params.total_stream_count, params.stereo_stream_count)}; | 93 | params.total_stream_count, params.stereo_stream_count)}; |
| 94 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; | 94 | auto frame_size{params.use_large_frame_size ? 5760 : 1920}; |
| 95 | work_buffer_size += Common::AlignUp(1500 * params.total_stream_count, 64); | 95 | work_buffer_size += Common::AlignUp(1500 * params.total_stream_count, 64); |
| 96 | work_buffer_size += | 96 | work_buffer_size += |
| 97 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64); | 97 | Common::AlignUp((frame_size * params.channel_count) / (48'000 / params.sample_rate), 64); |
| 98 | out_size = work_buffer_size; | 98 | out_size = work_buffer_size; |
| 99 | R_SUCCEED(); | 99 | R_SUCCEED(); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | } // namespace AudioCore::OpusDecoder | 102 | } // namespace AudioCore::OpusDecoder |
diff --git a/src/audio_core/opus/decoder_manager.h b/src/audio_core/opus/decoder_manager.h index 466e1967b..70ebc4bab 100644 --- a/src/audio_core/opus/decoder_manager.h +++ b/src/audio_core/opus/decoder_manager.h | |||
| @@ -1,38 +1,38 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include "audio_core/opus/hardware_opus.h" | 6 | #include "audio_core/opus/hardware_opus.h" |
| 7 | #include "audio_core/opus/parameters.h" | 7 | #include "audio_core/opus/parameters.h" |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "core/hle/service/audio/errors.h" | 9 | #include "core/hle/service/audio/errors.h" |
| 10 | 10 | ||
| 11 | namespace Core { | 11 | namespace Core { |
| 12 | class System; | 12 | class System; |
| 13 | } | 13 | } |
| 14 | 14 | ||
| 15 | namespace AudioCore::OpusDecoder { | 15 | namespace AudioCore::OpusDecoder { |
| 16 | 16 | ||
| 17 | class OpusDecoderManager { | 17 | class OpusDecoderManager { |
| 18 | public: | 18 | public: |
| 19 | OpusDecoderManager(Core::System& system); | 19 | OpusDecoderManager(Core::System& system); |
| 20 | 20 | ||
| 21 | HardwareOpus& GetHardwareOpus() { | 21 | HardwareOpus& GetHardwareOpus() { |
| 22 | return hardware_opus; | 22 | return hardware_opus; |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | Result GetWorkBufferSize(OpusParameters& params, u64& out_size); | 25 | Result GetWorkBufferSize(OpusParameters& params, u64& out_size); |
| 26 | Result GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size); | 26 | Result GetWorkBufferSizeEx(OpusParametersEx& params, u64& out_size); |
| 27 | Result GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size); | 27 | Result GetWorkBufferSizeExEx(OpusParametersEx& params, u64& out_size); |
| 28 | Result GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params, u64& out_size); | 28 | Result GetWorkBufferSizeForMultiStream(OpusMultiStreamParameters& params, u64& out_size); |
| 29 | Result GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params, u64& out_size); | 29 | Result GetWorkBufferSizeForMultiStreamEx(OpusMultiStreamParametersEx& params, u64& out_size); |
| 30 | Result GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params, u64& out_size); | 30 | Result GetWorkBufferSizeForMultiStreamExEx(OpusMultiStreamParametersEx& params, u64& out_size); |
| 31 | 31 | ||
| 32 | private: | 32 | private: |
| 33 | Core::System& system; | 33 | Core::System& system; |
| 34 | HardwareOpus hardware_opus; | 34 | HardwareOpus hardware_opus; |
| 35 | std::array<u64, MaxChannels> required_workbuffer_sizes{}; | 35 | std::array<u64, MaxChannels> required_workbuffer_sizes{}; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | } // namespace AudioCore::OpusDecoder | 38 | } // namespace AudioCore::OpusDecoder |
diff --git a/src/audio_core/opus/hardware_opus.cpp b/src/audio_core/opus/hardware_opus.cpp index d6544dcb0..5ff71ab2d 100644 --- a/src/audio_core/opus/hardware_opus.cpp +++ b/src/audio_core/opus/hardware_opus.cpp | |||
| @@ -1,241 +1,241 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #include <array> | 4 | #include <array> |
| 5 | 5 | ||
| 6 | #include "audio_core/audio_core.h" | 6 | #include "audio_core/audio_core.h" |
| 7 | #include "audio_core/opus/hardware_opus.h" | 7 | #include "audio_core/opus/hardware_opus.h" |
| 8 | #include "core/core.h" | 8 | #include "core/core.h" |
| 9 | 9 | ||
| 10 | namespace AudioCore::OpusDecoder { | 10 | namespace AudioCore::OpusDecoder { |
| 11 | namespace { | 11 | namespace { |
| 12 | using namespace Service::Audio; | 12 | using namespace Service::Audio; |
| 13 | 13 | ||
| 14 | static constexpr Result ResultCodeFromLibOpusErrorCode(u64 error_code) { | 14 | static constexpr Result ResultCodeFromLibOpusErrorCode(u64 error_code) { |
| 15 | s32 error{static_cast<s32>(error_code)}; | 15 | s32 error{static_cast<s32>(error_code)}; |
| 16 | ASSERT(error <= OPUS_OK); | 16 | ASSERT(error <= OPUS_OK); |
| 17 | switch (error) { | 17 | switch (error) { |
| 18 | case OPUS_ALLOC_FAIL: | 18 | case OPUS_ALLOC_FAIL: |
| 19 | R_THROW(ResultLibOpusAllocFail); | 19 | R_THROW(ResultLibOpusAllocFail); |
| 20 | case OPUS_INVALID_STATE: | 20 | case OPUS_INVALID_STATE: |
| 21 | R_THROW(ResultLibOpusInvalidState); | 21 | R_THROW(ResultLibOpusInvalidState); |
| 22 | case OPUS_UNIMPLEMENTED: | 22 | case OPUS_UNIMPLEMENTED: |
| 23 | R_THROW(ResultLibOpusUnimplemented); | 23 | R_THROW(ResultLibOpusUnimplemented); |
| 24 | case OPUS_INVALID_PACKET: | 24 | case OPUS_INVALID_PACKET: |
| 25 | R_THROW(ResultLibOpusInvalidPacket); | 25 | R_THROW(ResultLibOpusInvalidPacket); |
| 26 | case OPUS_INTERNAL_ERROR: | 26 | case OPUS_INTERNAL_ERROR: |
| 27 | R_THROW(ResultLibOpusInternalError); | 27 | R_THROW(ResultLibOpusInternalError); |
| 28 | case OPUS_BUFFER_TOO_SMALL: | 28 | case OPUS_BUFFER_TOO_SMALL: |
| 29 | R_THROW(ResultBufferTooSmall); | 29 | R_THROW(ResultBufferTooSmall); |
| 30 | case OPUS_BAD_ARG: | 30 | case OPUS_BAD_ARG: |
| 31 | R_THROW(ResultLibOpusBadArg); | 31 | R_THROW(ResultLibOpusBadArg); |
| 32 | case OPUS_OK: | 32 | case OPUS_OK: |
| 33 | R_RETURN(ResultSuccess); | 33 | R_RETURN(ResultSuccess); |
| 34 | } | 34 | } |
| 35 | UNREACHABLE(); | 35 | UNREACHABLE(); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | } // namespace | 38 | } // namespace |
| 39 | 39 | ||
| 40 | HardwareOpus::HardwareOpus(Core::System& system_) | 40 | HardwareOpus::HardwareOpus(Core::System& system_) |
| 41 | : system{system_}, opus_decoder{system.AudioCore().ADSP().OpusDecoder()} { | 41 | : system{system_}, opus_decoder{system.AudioCore().ADSP().OpusDecoder()} { |
| 42 | opus_decoder.SetSharedMemory(shared_memory); | 42 | opus_decoder.SetSharedMemory(shared_memory); |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | u64 HardwareOpus::GetWorkBufferSize(u32 channel) { | 45 | u64 HardwareOpus::GetWorkBufferSize(u32 channel) { |
| 46 | if (!opus_decoder.IsRunning()) { | 46 | if (!opus_decoder.IsRunning()) { |
| 47 | return 0; | 47 | return 0; |
| 48 | } | 48 | } |
| 49 | std::scoped_lock l{mutex}; | 49 | std::scoped_lock l{mutex}; |
| 50 | shared_memory.host_send_data[0] = channel; | 50 | shared_memory.host_send_data[0] = channel; |
| 51 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::GetWorkBufferSize); | 51 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::GetWorkBufferSize); |
| 52 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 52 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 53 | if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeOK) { | 53 | if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeOK) { |
| 54 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 54 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 55 | ADSP::OpusDecoder::Message::GetWorkBufferSizeOK, msg); | 55 | ADSP::OpusDecoder::Message::GetWorkBufferSizeOK, msg); |
| 56 | return 0; | 56 | return 0; |
| 57 | } | 57 | } |
| 58 | return shared_memory.dsp_return_data[0]; | 58 | return shared_memory.dsp_return_data[0]; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | u64 HardwareOpus::GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count) { | 61 | u64 HardwareOpus::GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count) { |
| 62 | std::scoped_lock l{mutex}; | 62 | std::scoped_lock l{mutex}; |
| 63 | shared_memory.host_send_data[0] = total_stream_count; | 63 | shared_memory.host_send_data[0] = total_stream_count; |
| 64 | shared_memory.host_send_data[1] = stereo_stream_count; | 64 | shared_memory.host_send_data[1] = stereo_stream_count; |
| 65 | opus_decoder.Send(ADSP::Direction::DSP, | 65 | opus_decoder.Send(ADSP::Direction::DSP, |
| 66 | ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStream); | 66 | ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStream); |
| 67 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 67 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 68 | if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK) { | 68 | if (msg != ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK) { |
| 69 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 69 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 70 | ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK, msg); | 70 | ADSP::OpusDecoder::Message::GetWorkBufferSizeForMultiStreamOK, msg); |
| 71 | return 0; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | return shared_memory.dsp_return_data[0]; | 73 | return shared_memory.dsp_return_data[0]; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | Result HardwareOpus::InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer, | 76 | Result HardwareOpus::InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer, |
| 77 | u64 buffer_size) { | 77 | u64 buffer_size) { |
| 78 | std::scoped_lock l{mutex}; | 78 | std::scoped_lock l{mutex}; |
| 79 | shared_memory.host_send_data[0] = (u64)buffer; | 79 | shared_memory.host_send_data[0] = (u64)buffer; |
| 80 | shared_memory.host_send_data[1] = buffer_size; | 80 | shared_memory.host_send_data[1] = buffer_size; |
| 81 | shared_memory.host_send_data[2] = sample_rate; | 81 | shared_memory.host_send_data[2] = sample_rate; |
| 82 | shared_memory.host_send_data[3] = channel_count; | 82 | shared_memory.host_send_data[3] = channel_count; |
| 83 | 83 | ||
| 84 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::InitializeDecodeObject); | 84 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::InitializeDecodeObject); |
| 85 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 85 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 86 | if (msg != ADSP::OpusDecoder::Message::InitializeDecodeObjectOK) { | 86 | if (msg != ADSP::OpusDecoder::Message::InitializeDecodeObjectOK) { |
| 87 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 87 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 88 | ADSP::OpusDecoder::Message::InitializeDecodeObjectOK, msg); | 88 | ADSP::OpusDecoder::Message::InitializeDecodeObjectOK, msg); |
| 89 | R_THROW(ResultInvalidOpusDSPReturnCode); | 89 | R_THROW(ResultInvalidOpusDSPReturnCode); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); | 92 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | Result HardwareOpus::InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count, | 95 | Result HardwareOpus::InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count, |
| 96 | u32 total_stream_count, | 96 | u32 total_stream_count, |
| 97 | u32 stereo_stream_count, void* mappings, | 97 | u32 stereo_stream_count, void* mappings, |
| 98 | void* buffer, u64 buffer_size) { | 98 | void* buffer, u64 buffer_size) { |
| 99 | std::scoped_lock l{mutex}; | 99 | std::scoped_lock l{mutex}; |
| 100 | shared_memory.host_send_data[0] = (u64)buffer; | 100 | shared_memory.host_send_data[0] = (u64)buffer; |
| 101 | shared_memory.host_send_data[1] = buffer_size; | 101 | shared_memory.host_send_data[1] = buffer_size; |
| 102 | shared_memory.host_send_data[2] = sample_rate; | 102 | shared_memory.host_send_data[2] = sample_rate; |
| 103 | shared_memory.host_send_data[3] = channel_count; | 103 | shared_memory.host_send_data[3] = channel_count; |
| 104 | shared_memory.host_send_data[4] = total_stream_count; | 104 | shared_memory.host_send_data[4] = total_stream_count; |
| 105 | shared_memory.host_send_data[5] = stereo_stream_count; | 105 | shared_memory.host_send_data[5] = stereo_stream_count; |
| 106 | 106 | ||
| 107 | ASSERT(channel_count <= MaxChannels); | 107 | ASSERT(channel_count <= MaxChannels); |
| 108 | std::memcpy(shared_memory.channel_mapping.data(), mappings, channel_count * sizeof(u8)); | 108 | std::memcpy(shared_memory.channel_mapping.data(), mappings, channel_count * sizeof(u8)); |
| 109 | 109 | ||
| 110 | opus_decoder.Send(ADSP::Direction::DSP, | 110 | opus_decoder.Send(ADSP::Direction::DSP, |
| 111 | ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObject); | 111 | ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObject); |
| 112 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 112 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 113 | if (msg != ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK) { | 113 | if (msg != ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK) { |
| 114 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 114 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 115 | ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK, msg); | 115 | ADSP::OpusDecoder::Message::InitializeMultiStreamDecodeObjectOK, msg); |
| 116 | R_THROW(ResultInvalidOpusDSPReturnCode); | 116 | R_THROW(ResultInvalidOpusDSPReturnCode); |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); | 119 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | Result HardwareOpus::ShutdownDecodeObject(void* buffer, u64 buffer_size) { | 122 | Result HardwareOpus::ShutdownDecodeObject(void* buffer, u64 buffer_size) { |
| 123 | std::scoped_lock l{mutex}; | 123 | std::scoped_lock l{mutex}; |
| 124 | shared_memory.host_send_data[0] = (u64)buffer; | 124 | shared_memory.host_send_data[0] = (u64)buffer; |
| 125 | shared_memory.host_send_data[1] = buffer_size; | 125 | shared_memory.host_send_data[1] = buffer_size; |
| 126 | 126 | ||
| 127 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::ShutdownDecodeObject); | 127 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::ShutdownDecodeObject); |
| 128 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 128 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 129 | ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK, | 129 | ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK, |
| 130 | "Expected Opus shutdown code {}, got {}", | 130 | "Expected Opus shutdown code {}, got {}", |
| 131 | ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK, msg); | 131 | ADSP::OpusDecoder::Message::ShutdownDecodeObjectOK, msg); |
| 132 | 132 | ||
| 133 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); | 133 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | Result HardwareOpus::ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size) { | 136 | Result HardwareOpus::ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size) { |
| 137 | std::scoped_lock l{mutex}; | 137 | std::scoped_lock l{mutex}; |
| 138 | shared_memory.host_send_data[0] = (u64)buffer; | 138 | shared_memory.host_send_data[0] = (u64)buffer; |
| 139 | shared_memory.host_send_data[1] = buffer_size; | 139 | shared_memory.host_send_data[1] = buffer_size; |
| 140 | 140 | ||
| 141 | opus_decoder.Send(ADSP::Direction::DSP, | 141 | opus_decoder.Send(ADSP::Direction::DSP, |
| 142 | ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObject); | 142 | ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObject); |
| 143 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 143 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 144 | ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK, | 144 | ASSERT_MSG(msg == ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK, |
| 145 | "Expected Opus shutdown code {}, got {}", | 145 | "Expected Opus shutdown code {}, got {}", |
| 146 | ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK, msg); | 146 | ADSP::OpusDecoder::Message::ShutdownMultiStreamDecodeObjectOK, msg); |
| 147 | 147 | ||
| 148 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); | 148 | R_RETURN(ResultCodeFromLibOpusErrorCode(shared_memory.dsp_return_data[0])); |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | Result HardwareOpus::DecodeInterleaved(u32& out_sample_count, void* output_data, | 151 | Result HardwareOpus::DecodeInterleaved(u32& out_sample_count, void* output_data, |
| 152 | u64 output_data_size, u32 channel_count, void* input_data, | 152 | u64 output_data_size, u32 channel_count, void* input_data, |
| 153 | u64 input_data_size, void* buffer, u64& out_time_taken, | 153 | u64 input_data_size, void* buffer, u64& out_time_taken, |
| 154 | bool reset) { | 154 | bool reset) { |
| 155 | std::scoped_lock l{mutex}; | 155 | std::scoped_lock l{mutex}; |
| 156 | shared_memory.host_send_data[0] = (u64)buffer; | 156 | shared_memory.host_send_data[0] = (u64)buffer; |
| 157 | shared_memory.host_send_data[1] = (u64)input_data; | 157 | shared_memory.host_send_data[1] = (u64)input_data; |
| 158 | shared_memory.host_send_data[2] = input_data_size; | 158 | shared_memory.host_send_data[2] = input_data_size; |
| 159 | shared_memory.host_send_data[3] = (u64)output_data; | 159 | shared_memory.host_send_data[3] = (u64)output_data; |
| 160 | shared_memory.host_send_data[4] = output_data_size; | 160 | shared_memory.host_send_data[4] = output_data_size; |
| 161 | shared_memory.host_send_data[5] = 0; | 161 | shared_memory.host_send_data[5] = 0; |
| 162 | shared_memory.host_send_data[6] = reset; | 162 | shared_memory.host_send_data[6] = reset; |
| 163 | 163 | ||
| 164 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::DecodeInterleaved); | 164 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::DecodeInterleaved); |
| 165 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 165 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 166 | if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedOK) { | 166 | if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedOK) { |
| 167 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 167 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 168 | ADSP::OpusDecoder::Message::DecodeInterleavedOK, msg); | 168 | ADSP::OpusDecoder::Message::DecodeInterleavedOK, msg); |
| 169 | R_THROW(ResultInvalidOpusDSPReturnCode); | 169 | R_THROW(ResultInvalidOpusDSPReturnCode); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])}; | 172 | auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])}; |
| 173 | if (error_code == OPUS_OK) { | 173 | if (error_code == OPUS_OK) { |
| 174 | out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]); | 174 | out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]); |
| 175 | out_time_taken = 1000 * shared_memory.dsp_return_data[2]; | 175 | out_time_taken = 1000 * shared_memory.dsp_return_data[2]; |
| 176 | } | 176 | } |
| 177 | R_RETURN(ResultCodeFromLibOpusErrorCode(error_code)); | 177 | R_RETURN(ResultCodeFromLibOpusErrorCode(error_code)); |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | Result HardwareOpus::DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data, | 180 | Result HardwareOpus::DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data, |
| 181 | u64 output_data_size, u32 channel_count, | 181 | u64 output_data_size, u32 channel_count, |
| 182 | void* input_data, u64 input_data_size, | 182 | void* input_data, u64 input_data_size, |
| 183 | void* buffer, u64& out_time_taken, | 183 | void* buffer, u64& out_time_taken, |
| 184 | bool reset) { | 184 | bool reset) { |
| 185 | std::scoped_lock l{mutex}; | 185 | std::scoped_lock l{mutex}; |
| 186 | shared_memory.host_send_data[0] = (u64)buffer; | 186 | shared_memory.host_send_data[0] = (u64)buffer; |
| 187 | shared_memory.host_send_data[1] = (u64)input_data; | 187 | shared_memory.host_send_data[1] = (u64)input_data; |
| 188 | shared_memory.host_send_data[2] = input_data_size; | 188 | shared_memory.host_send_data[2] = input_data_size; |
| 189 | shared_memory.host_send_data[3] = (u64)output_data; | 189 | shared_memory.host_send_data[3] = (u64)output_data; |
| 190 | shared_memory.host_send_data[4] = output_data_size; | 190 | shared_memory.host_send_data[4] = output_data_size; |
| 191 | shared_memory.host_send_data[5] = 0; | 191 | shared_memory.host_send_data[5] = 0; |
| 192 | shared_memory.host_send_data[6] = reset; | 192 | shared_memory.host_send_data[6] = reset; |
| 193 | 193 | ||
| 194 | opus_decoder.Send(ADSP::Direction::DSP, | 194 | opus_decoder.Send(ADSP::Direction::DSP, |
| 195 | ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStream); | 195 | ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStream); |
| 196 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 196 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 197 | if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK) { | 197 | if (msg != ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK) { |
| 198 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 198 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 199 | ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK, msg); | 199 | ADSP::OpusDecoder::Message::DecodeInterleavedForMultiStreamOK, msg); |
| 200 | R_THROW(ResultInvalidOpusDSPReturnCode); | 200 | R_THROW(ResultInvalidOpusDSPReturnCode); |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])}; | 203 | auto error_code{static_cast<s32>(shared_memory.dsp_return_data[0])}; |
| 204 | if (error_code == OPUS_OK) { | 204 | if (error_code == OPUS_OK) { |
| 205 | out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]); | 205 | out_sample_count = static_cast<u32>(shared_memory.dsp_return_data[1]); |
| 206 | out_time_taken = 1000 * shared_memory.dsp_return_data[2]; | 206 | out_time_taken = 1000 * shared_memory.dsp_return_data[2]; |
| 207 | } | 207 | } |
| 208 | R_RETURN(ResultCodeFromLibOpusErrorCode(error_code)); | 208 | R_RETURN(ResultCodeFromLibOpusErrorCode(error_code)); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | Result HardwareOpus::MapMemory(void* buffer, u64 buffer_size) { | 211 | Result HardwareOpus::MapMemory(void* buffer, u64 buffer_size) { |
| 212 | std::scoped_lock l{mutex}; | 212 | std::scoped_lock l{mutex}; |
| 213 | shared_memory.host_send_data[0] = (u64)buffer; | 213 | shared_memory.host_send_data[0] = (u64)buffer; |
| 214 | shared_memory.host_send_data[1] = buffer_size; | 214 | shared_memory.host_send_data[1] = buffer_size; |
| 215 | 215 | ||
| 216 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::MapMemory); | 216 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::MapMemory); |
| 217 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 217 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 218 | if (msg != ADSP::OpusDecoder::Message::MapMemoryOK) { | 218 | if (msg != ADSP::OpusDecoder::Message::MapMemoryOK) { |
| 219 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 219 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 220 | ADSP::OpusDecoder::Message::MapMemoryOK, msg); | 220 | ADSP::OpusDecoder::Message::MapMemoryOK, msg); |
| 221 | R_THROW(ResultInvalidOpusDSPReturnCode); | 221 | R_THROW(ResultInvalidOpusDSPReturnCode); |
| 222 | } | 222 | } |
| 223 | R_SUCCEED(); | 223 | R_SUCCEED(); |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | Result HardwareOpus::UnmapMemory(void* buffer, u64 buffer_size) { | 226 | Result HardwareOpus::UnmapMemory(void* buffer, u64 buffer_size) { |
| 227 | std::scoped_lock l{mutex}; | 227 | std::scoped_lock l{mutex}; |
| 228 | shared_memory.host_send_data[0] = (u64)buffer; | 228 | shared_memory.host_send_data[0] = (u64)buffer; |
| 229 | shared_memory.host_send_data[1] = buffer_size; | 229 | shared_memory.host_send_data[1] = buffer_size; |
| 230 | 230 | ||
| 231 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::UnmapMemory); | 231 | opus_decoder.Send(ADSP::Direction::DSP, ADSP::OpusDecoder::Message::UnmapMemory); |
| 232 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); | 232 | auto msg = opus_decoder.Receive(ADSP::Direction::Host); |
| 233 | if (msg != ADSP::OpusDecoder::Message::UnmapMemoryOK) { | 233 | if (msg != ADSP::OpusDecoder::Message::UnmapMemoryOK) { |
| 234 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", | 234 | LOG_ERROR(Service_Audio, "OpusDecoder returned invalid message. Expected {} got {}", |
| 235 | ADSP::OpusDecoder::Message::UnmapMemoryOK, msg); | 235 | ADSP::OpusDecoder::Message::UnmapMemoryOK, msg); |
| 236 | R_THROW(ResultInvalidOpusDSPReturnCode); | 236 | R_THROW(ResultInvalidOpusDSPReturnCode); |
| 237 | } | 237 | } |
| 238 | R_SUCCEED(); | 238 | R_SUCCEED(); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | } // namespace AudioCore::OpusDecoder | 241 | } // namespace AudioCore::OpusDecoder |
diff --git a/src/audio_core/opus/hardware_opus.h b/src/audio_core/opus/hardware_opus.h index 7013a6b40..b10184baa 100644 --- a/src/audio_core/opus/hardware_opus.h +++ b/src/audio_core/opus/hardware_opus.h | |||
| @@ -1,45 +1,45 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <mutex> | 6 | #include <mutex> |
| 7 | #include <opus.h> | 7 | #include <opus.h> |
| 8 | 8 | ||
| 9 | #include "audio_core/adsp/apps/opus/opus_decoder.h" | 9 | #include "audio_core/adsp/apps/opus/opus_decoder.h" |
| 10 | #include "audio_core/adsp/apps/opus/shared_memory.h" | 10 | #include "audio_core/adsp/apps/opus/shared_memory.h" |
| 11 | #include "audio_core/adsp/mailbox.h" | 11 | #include "audio_core/adsp/mailbox.h" |
| 12 | #include "core/hle/service/audio/errors.h" | 12 | #include "core/hle/service/audio/errors.h" |
| 13 | 13 | ||
| 14 | namespace AudioCore::OpusDecoder { | 14 | namespace AudioCore::OpusDecoder { |
| 15 | class HardwareOpus { | 15 | class HardwareOpus { |
| 16 | public: | 16 | public: |
| 17 | HardwareOpus(Core::System& system); | 17 | HardwareOpus(Core::System& system); |
| 18 | 18 | ||
| 19 | u64 GetWorkBufferSize(u32 channel); | 19 | u64 GetWorkBufferSize(u32 channel); |
| 20 | u64 GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count); | 20 | u64 GetWorkBufferSizeForMultiStream(u32 total_stream_count, u32 stereo_stream_count); |
| 21 | 21 | ||
| 22 | Result InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer, | 22 | Result InitializeDecodeObject(u32 sample_rate, u32 channel_count, void* buffer, |
| 23 | u64 buffer_size); | 23 | u64 buffer_size); |
| 24 | Result InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count, | 24 | Result InitializeMultiStreamDecodeObject(u32 sample_rate, u32 channel_count, |
| 25 | u32 totaL_stream_count, u32 stereo_stream_count, | 25 | u32 totaL_stream_count, u32 stereo_stream_count, |
| 26 | void* mappings, void* buffer, u64 buffer_size); | 26 | void* mappings, void* buffer, u64 buffer_size); |
| 27 | Result ShutdownDecodeObject(void* buffer, u64 buffer_size); | 27 | Result ShutdownDecodeObject(void* buffer, u64 buffer_size); |
| 28 | Result ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size); | 28 | Result ShutdownMultiStreamDecodeObject(void* buffer, u64 buffer_size); |
| 29 | Result DecodeInterleaved(u32& out_sample_count, void* output_data, u64 output_data_size, | 29 | Result DecodeInterleaved(u32& out_sample_count, void* output_data, u64 output_data_size, |
| 30 | u32 channel_count, void* input_data, u64 input_data_size, void* buffer, | 30 | u32 channel_count, void* input_data, u64 input_data_size, void* buffer, |
| 31 | u64& out_time_taken, bool reset); | 31 | u64& out_time_taken, bool reset); |
| 32 | Result DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data, | 32 | Result DecodeInterleavedForMultiStream(u32& out_sample_count, void* output_data, |
| 33 | u64 output_data_size, u32 channel_count, | 33 | u64 output_data_size, u32 channel_count, |
| 34 | void* input_data, u64 input_data_size, void* buffer, | 34 | void* input_data, u64 input_data_size, void* buffer, |
| 35 | u64& out_time_taken, bool reset); | 35 | u64& out_time_taken, bool reset); |
| 36 | Result MapMemory(void* buffer, u64 buffer_size); | 36 | Result MapMemory(void* buffer, u64 buffer_size); |
| 37 | Result UnmapMemory(void* buffer, u64 buffer_size); | 37 | Result UnmapMemory(void* buffer, u64 buffer_size); |
| 38 | 38 | ||
| 39 | private: | 39 | private: |
| 40 | Core::System& system; | 40 | Core::System& system; |
| 41 | std::mutex mutex; | 41 | std::mutex mutex; |
| 42 | ADSP::OpusDecoder::OpusDecoder& opus_decoder; | 42 | ADSP::OpusDecoder::OpusDecoder& opus_decoder; |
| 43 | ADSP::OpusDecoder::SharedMemory shared_memory; | 43 | ADSP::OpusDecoder::SharedMemory shared_memory; |
| 44 | }; | 44 | }; |
| 45 | } // namespace AudioCore::OpusDecoder | 45 | } // namespace AudioCore::OpusDecoder |
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 4b1690269..166dc3dce 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp | |||
| @@ -9,12 +9,12 @@ PageTable::PageTable() = default; | |||
| 9 | 9 | ||
| 10 | PageTable::~PageTable() noexcept = default; | 10 | PageTable::~PageTable() noexcept = default; |
| 11 | 11 | ||
| 12 | bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, | 12 | bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, |
| 13 | u64 address) const { | 13 | Common::ProcessAddress address) const { |
| 14 | // Setup invalid defaults. | 14 | // Setup invalid defaults. |
| 15 | out_entry.phys_addr = 0; | 15 | out_entry->phys_addr = 0; |
| 16 | out_entry.block_size = page_size; | 16 | out_entry->block_size = page_size; |
| 17 | out_context.next_page = 0; | 17 | out_context->next_page = 0; |
| 18 | 18 | ||
| 19 | // Validate that we can read the actual entry. | 19 | // Validate that we can read the actual entry. |
| 20 | const auto page = address / page_size; | 20 | const auto page = address / page_size; |
| @@ -29,20 +29,20 @@ bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_ | |||
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | // Populate the results. | 31 | // Populate the results. |
| 32 | out_entry.phys_addr = phys_addr + address; | 32 | out_entry->phys_addr = phys_addr + GetInteger(address); |
| 33 | out_context.next_page = page + 1; | 33 | out_context->next_page = page + 1; |
| 34 | out_context.next_offset = address + page_size; | 34 | out_context->next_offset = GetInteger(address) + page_size; |
| 35 | 35 | ||
| 36 | return true; | 36 | return true; |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const { | 39 | bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { |
| 40 | // Setup invalid defaults. | 40 | // Setup invalid defaults. |
| 41 | out_entry.phys_addr = 0; | 41 | out_entry->phys_addr = 0; |
| 42 | out_entry.block_size = page_size; | 42 | out_entry->block_size = page_size; |
| 43 | 43 | ||
| 44 | // Validate that we can read the actual entry. | 44 | // Validate that we can read the actual entry. |
| 45 | const auto page = context.next_page; | 45 | const auto page = context->next_page; |
| 46 | if (page >= backing_addr.size()) { | 46 | if (page >= backing_addr.size()) { |
| 47 | return false; | 47 | return false; |
| 48 | } | 48 | } |
| @@ -54,9 +54,9 @@ bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& c | |||
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | // Populate the results. | 56 | // Populate the results. |
| 57 | out_entry.phys_addr = phys_addr + context.next_offset; | 57 | out_entry->phys_addr = phys_addr + context->next_offset; |
| 58 | context.next_page = page + 1; | 58 | context->next_page = page + 1; |
| 59 | context.next_offset += page_size; | 59 | context->next_offset += page_size; |
| 60 | 60 | ||
| 61 | return true; | 61 | return true; |
| 62 | } | 62 | } |
diff --git a/src/common/page_table.h b/src/common/page_table.h index e653d52ad..5340f7d86 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <atomic> | 6 | #include <atomic> |
| 7 | 7 | ||
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "common/typed_address.h" | ||
| 9 | #include "common/virtual_buffer.h" | 10 | #include "common/virtual_buffer.h" |
| 10 | 11 | ||
| 11 | namespace Common { | 12 | namespace Common { |
| @@ -100,9 +101,9 @@ struct PageTable { | |||
| 100 | PageTable(PageTable&&) noexcept = default; | 101 | PageTable(PageTable&&) noexcept = default; |
| 101 | PageTable& operator=(PageTable&&) noexcept = default; | 102 | PageTable& operator=(PageTable&&) noexcept = default; |
| 102 | 103 | ||
| 103 | bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, | 104 | bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, |
| 104 | u64 address) const; | 105 | Common::ProcessAddress address) const; |
| 105 | bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const; | 106 | bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const; |
| 106 | 107 | ||
| 107 | /** | 108 | /** |
| 108 | * Resizes the page table to be able to accommodate enough pages within | 109 | * Resizes the page table to be able to accommodate enough pages within |
| @@ -117,6 +118,16 @@ struct PageTable { | |||
| 117 | return current_address_space_width_in_bits; | 118 | return current_address_space_width_in_bits; |
| 118 | } | 119 | } |
| 119 | 120 | ||
| 121 | bool GetPhysicalAddress(Common::PhysicalAddress* out_phys_addr, | ||
| 122 | Common::ProcessAddress virt_addr) const { | ||
| 123 | if (virt_addr > (1ULL << this->GetAddressSpaceBits())) { | ||
| 124 | return false; | ||
| 125 | } | ||
| 126 | |||
| 127 | *out_phys_addr = backing_addr[virt_addr / page_size] + GetInteger(virt_addr); | ||
| 128 | return true; | ||
| 129 | } | ||
| 130 | |||
| 120 | /** | 131 | /** |
| 121 | * Vector of memory pointers backing each page. An entry can only be non-null if the | 132 | * Vector of memory pointers backing each page. An entry can only be non-null if the |
| 122 | * corresponding attribute element is of type `Memory`. | 133 | * corresponding attribute element is of type `Memory`. |
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index e4f499135..8be3bdd08 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -271,8 +271,9 @@ add_library(core STATIC | |||
| 271 | hle/kernel/k_page_heap.h | 271 | hle/kernel/k_page_heap.h |
| 272 | hle/kernel/k_page_group.cpp | 272 | hle/kernel/k_page_group.cpp |
| 273 | hle/kernel/k_page_group.h | 273 | hle/kernel/k_page_group.h |
| 274 | hle/kernel/k_page_table.cpp | ||
| 275 | hle/kernel/k_page_table.h | 274 | hle/kernel/k_page_table.h |
| 275 | hle/kernel/k_page_table_base.cpp | ||
| 276 | hle/kernel/k_page_table_base.h | ||
| 276 | hle/kernel/k_page_table_manager.h | 277 | hle/kernel/k_page_table_manager.h |
| 277 | hle/kernel/k_page_table_slab_heap.h | 278 | hle/kernel/k_page_table_slab_heap.h |
| 278 | hle/kernel/k_port.cpp | 279 | hle/kernel/k_port.cpp |
| @@ -280,6 +281,7 @@ add_library(core STATIC | |||
| 280 | hle/kernel/k_priority_queue.h | 281 | hle/kernel/k_priority_queue.h |
| 281 | hle/kernel/k_process.cpp | 282 | hle/kernel/k_process.cpp |
| 282 | hle/kernel/k_process.h | 283 | hle/kernel/k_process.h |
| 284 | hle/kernel/k_process_page_table.h | ||
| 283 | hle/kernel/k_readable_event.cpp | 285 | hle/kernel/k_readable_event.cpp |
| 284 | hle/kernel/k_readable_event.h | 286 | hle/kernel/k_readable_event.h |
| 285 | hle/kernel/k_resource_limit.cpp | 287 | hle/kernel/k_resource_limit.cpp |
| @@ -330,8 +332,6 @@ add_library(core STATIC | |||
| 330 | hle/kernel/physical_core.cpp | 332 | hle/kernel/physical_core.cpp |
| 331 | hle/kernel/physical_core.h | 333 | hle/kernel/physical_core.h |
| 332 | hle/kernel/physical_memory.h | 334 | hle/kernel/physical_memory.h |
| 333 | hle/kernel/process_capability.cpp | ||
| 334 | hle/kernel/process_capability.h | ||
| 335 | hle/kernel/slab_helpers.h | 335 | hle/kernel/slab_helpers.h |
| 336 | hle/kernel/svc.cpp | 336 | hle/kernel/svc.cpp |
| 337 | hle/kernel/svc.h | 337 | hle/kernel/svc.h |
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 6f5f5156b..148dd3e39 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp | |||
| @@ -562,6 +562,120 @@ static std::string PaginateBuffer(std::string_view buffer, std::string_view requ | |||
| 562 | } | 562 | } |
| 563 | } | 563 | } |
| 564 | 564 | ||
| 565 | static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) { | ||
| 566 | Kernel::KMemoryInfo mem_info; | ||
| 567 | Kernel::Svc::MemoryInfo svc_mem_info; | ||
| 568 | Kernel::Svc::PageInfo page_info; | ||
| 569 | VAddr cur_addr{base}; | ||
| 570 | |||
| 571 | // Expect: r-x Code (.text) | ||
| 572 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); | ||
| 573 | svc_mem_info = mem_info.GetSvcMemoryInfo(); | ||
| 574 | cur_addr = svc_mem_info.base_address + svc_mem_info.size; | ||
| 575 | if (svc_mem_info.state != Kernel::Svc::MemoryState::Code || | ||
| 576 | svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { | ||
| 577 | return cur_addr - 1; | ||
| 578 | } | ||
| 579 | |||
| 580 | // Expect: r-- Code (.rodata) | ||
| 581 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); | ||
| 582 | svc_mem_info = mem_info.GetSvcMemoryInfo(); | ||
| 583 | cur_addr = svc_mem_info.base_address + svc_mem_info.size; | ||
| 584 | if (svc_mem_info.state != Kernel::Svc::MemoryState::Code || | ||
| 585 | svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) { | ||
| 586 | return cur_addr - 1; | ||
| 587 | } | ||
| 588 | |||
| 589 | // Expect: rw- CodeData (.data) | ||
| 590 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); | ||
| 591 | svc_mem_info = mem_info.GetSvcMemoryInfo(); | ||
| 592 | cur_addr = svc_mem_info.base_address + svc_mem_info.size; | ||
| 593 | return cur_addr - 1; | ||
| 594 | } | ||
| 595 | |||
| 596 | static Loader::AppLoader::Modules FindModules(Core::System& system) { | ||
| 597 | Loader::AppLoader::Modules modules; | ||
| 598 | |||
| 599 | auto& page_table = system.ApplicationProcess()->GetPageTable(); | ||
| 600 | auto& memory = system.ApplicationMemory(); | ||
| 601 | VAddr cur_addr = 0; | ||
| 602 | |||
| 603 | // Look for executable sections in Code or AliasCode regions. | ||
| 604 | while (true) { | ||
| 605 | Kernel::KMemoryInfo mem_info{}; | ||
| 606 | Kernel::Svc::PageInfo page_info{}; | ||
| 607 | R_ASSERT( | ||
| 608 | page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr)); | ||
| 609 | auto svc_mem_info = mem_info.GetSvcMemoryInfo(); | ||
| 610 | |||
| 611 | if (svc_mem_info.permission == Kernel::Svc::MemoryPermission::ReadExecute && | ||
| 612 | (svc_mem_info.state == Kernel::Svc::MemoryState::Code || | ||
| 613 | svc_mem_info.state == Kernel::Svc::MemoryState::AliasCode)) { | ||
| 614 | // Try to read the module name from its path. | ||
| 615 | constexpr s32 PathLengthMax = 0x200; | ||
| 616 | struct { | ||
| 617 | u32 zero; | ||
| 618 | s32 path_length; | ||
| 619 | std::array<char, PathLengthMax> path; | ||
| 620 | } module_path; | ||
| 621 | |||
| 622 | if (memory.ReadBlock(svc_mem_info.base_address + svc_mem_info.size, &module_path, | ||
| 623 | sizeof(module_path))) { | ||
| 624 | if (module_path.zero == 0 && module_path.path_length > 0) { | ||
| 625 | // Truncate module name. | ||
| 626 | module_path.path[PathLengthMax - 1] = '\0'; | ||
| 627 | |||
| 628 | // Ignore leading directories. | ||
| 629 | char* path_pointer = module_path.path.data(); | ||
| 630 | |||
| 631 | for (s32 i = 0; i < std::min(PathLengthMax, module_path.path_length) && | ||
| 632 | module_path.path[i] != '\0'; | ||
| 633 | i++) { | ||
| 634 | if (module_path.path[i] == '/' || module_path.path[i] == '\\') { | ||
| 635 | path_pointer = module_path.path.data() + i + 1; | ||
| 636 | } | ||
| 637 | } | ||
| 638 | |||
| 639 | // Insert output. | ||
| 640 | modules.emplace(svc_mem_info.base_address, path_pointer); | ||
| 641 | } | ||
| 642 | } | ||
| 643 | } | ||
| 644 | |||
| 645 | // Check if we're done. | ||
| 646 | const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size; | ||
| 647 | if (next_address <= cur_addr) { | ||
| 648 | break; | ||
| 649 | } | ||
| 650 | |||
| 651 | cur_addr = next_address; | ||
| 652 | } | ||
| 653 | |||
| 654 | return modules; | ||
| 655 | } | ||
| 656 | |||
| 657 | static VAddr FindMainModuleEntrypoint(Core::System& system) { | ||
| 658 | Loader::AppLoader::Modules modules; | ||
| 659 | system.GetAppLoader().ReadNSOModules(modules); | ||
| 660 | |||
| 661 | // Do we have a module named main? | ||
| 662 | const auto main = std::find_if(modules.begin(), modules.end(), | ||
| 663 | [](const auto& key) { return key.second == "main"; }); | ||
| 664 | |||
| 665 | if (main != modules.end()) { | ||
| 666 | return main->first; | ||
| 667 | } | ||
| 668 | |||
| 669 | // Do we have any loaded executable sections? | ||
| 670 | modules = FindModules(system); | ||
| 671 | if (!modules.empty()) { | ||
| 672 | return modules.begin()->first; | ||
| 673 | } | ||
| 674 | |||
| 675 | // As a last resort, use the start of the code region. | ||
| 676 | return GetInteger(system.ApplicationProcess()->GetPageTable().GetCodeRegionStart()); | ||
| 677 | } | ||
| 678 | |||
| 565 | void GDBStub::HandleQuery(std::string_view command) { | 679 | void GDBStub::HandleQuery(std::string_view command) { |
| 566 | if (command.starts_with("TStatus")) { | 680 | if (command.starts_with("TStatus")) { |
| 567 | // no tracepoint support | 681 | // no tracepoint support |
| @@ -573,21 +687,10 @@ void GDBStub::HandleQuery(std::string_view command) { | |||
| 573 | const auto target_xml{arch->GetTargetXML()}; | 687 | const auto target_xml{arch->GetTargetXML()}; |
| 574 | SendReply(PaginateBuffer(target_xml, command.substr(30))); | 688 | SendReply(PaginateBuffer(target_xml, command.substr(30))); |
| 575 | } else if (command.starts_with("Offsets")) { | 689 | } else if (command.starts_with("Offsets")) { |
| 576 | Loader::AppLoader::Modules modules; | 690 | const auto main_offset = FindMainModuleEntrypoint(system); |
| 577 | system.GetAppLoader().ReadNSOModules(modules); | 691 | SendReply(fmt::format("TextSeg={:x}", main_offset)); |
| 578 | |||
| 579 | const auto main = std::find_if(modules.begin(), modules.end(), | ||
| 580 | [](const auto& key) { return key.second == "main"; }); | ||
| 581 | if (main != modules.end()) { | ||
| 582 | SendReply(fmt::format("TextSeg={:x}", main->first)); | ||
| 583 | } else { | ||
| 584 | SendReply(fmt::format( | ||
| 585 | "TextSeg={:x}", | ||
| 586 | GetInteger(system.ApplicationProcess()->GetPageTable().GetCodeRegionStart()))); | ||
| 587 | } | ||
| 588 | } else if (command.starts_with("Xfer:libraries:read::")) { | 692 | } else if (command.starts_with("Xfer:libraries:read::")) { |
| 589 | Loader::AppLoader::Modules modules; | 693 | auto modules = FindModules(system); |
| 590 | system.GetAppLoader().ReadNSOModules(modules); | ||
| 591 | 694 | ||
| 592 | std::string buffer; | 695 | std::string buffer; |
| 593 | buffer += R"(<?xml version="1.0"?>)"; | 696 | buffer += R"(<?xml version="1.0"?>)"; |
| @@ -727,32 +830,6 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory | |||
| 727 | } | 830 | } |
| 728 | } | 831 | } |
| 729 | 832 | ||
| 730 | static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) { | ||
| 731 | Kernel::Svc::MemoryInfo mem_info; | ||
| 732 | VAddr cur_addr{base}; | ||
| 733 | |||
| 734 | // Expect: r-x Code (.text) | ||
| 735 | mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | ||
| 736 | cur_addr = mem_info.base_address + mem_info.size; | ||
| 737 | if (mem_info.state != Kernel::Svc::MemoryState::Code || | ||
| 738 | mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) { | ||
| 739 | return cur_addr - 1; | ||
| 740 | } | ||
| 741 | |||
| 742 | // Expect: r-- Code (.rodata) | ||
| 743 | mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | ||
| 744 | cur_addr = mem_info.base_address + mem_info.size; | ||
| 745 | if (mem_info.state != Kernel::Svc::MemoryState::Code || | ||
| 746 | mem_info.permission != Kernel::Svc::MemoryPermission::Read) { | ||
| 747 | return cur_addr - 1; | ||
| 748 | } | ||
| 749 | |||
| 750 | // Expect: rw- CodeData (.data) | ||
| 751 | mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | ||
| 752 | cur_addr = mem_info.base_address + mem_info.size; | ||
| 753 | return cur_addr - 1; | ||
| 754 | } | ||
| 755 | |||
| 756 | void GDBStub::HandleRcmd(const std::vector<u8>& command) { | 833 | void GDBStub::HandleRcmd(const std::vector<u8>& command) { |
| 757 | std::string_view command_str{reinterpret_cast<const char*>(&command[0]), command.size()}; | 834 | std::string_view command_str{reinterpret_cast<const char*>(&command[0]), command.size()}; |
| 758 | std::string reply; | 835 | std::string reply; |
| @@ -767,7 +844,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 767 | 844 | ||
| 768 | if (command_str == "get fastmem") { | 845 | if (command_str == "get fastmem") { |
| 769 | if (Settings::IsFastmemEnabled()) { | 846 | if (Settings::IsFastmemEnabled()) { |
| 770 | const auto& impl = page_table.PageTableImpl(); | 847 | const auto& impl = page_table.GetImpl(); |
| 771 | const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena); | 848 | const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena); |
| 772 | const auto region_bits = impl.current_address_space_width_in_bits; | 849 | const auto region_bits = impl.current_address_space_width_in_bits; |
| 773 | const auto region_size = 1ULL << region_bits; | 850 | const auto region_size = 1ULL << region_bits; |
| @@ -779,26 +856,27 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 779 | reply = "Fastmem is not enabled.\n"; | 856 | reply = "Fastmem is not enabled.\n"; |
| 780 | } | 857 | } |
| 781 | } else if (command_str == "get info") { | 858 | } else if (command_str == "get info") { |
| 782 | Loader::AppLoader::Modules modules; | 859 | auto modules = FindModules(system); |
| 783 | system.GetAppLoader().ReadNSOModules(modules); | ||
| 784 | 860 | ||
| 785 | reply = fmt::format("Process: {:#x} ({})\n" | 861 | reply = fmt::format("Process: {:#x} ({})\n" |
| 786 | "Program Id: {:#018x}\n", | 862 | "Program Id: {:#018x}\n", |
| 787 | process->GetProcessId(), process->GetName(), process->GetProgramId()); | 863 | process->GetProcessId(), process->GetName(), process->GetProgramId()); |
| 788 | reply += fmt::format("Layout:\n" | 864 | reply += fmt::format( |
| 789 | " Alias: {:#012x} - {:#012x}\n" | 865 | "Layout:\n" |
| 790 | " Heap: {:#012x} - {:#012x}\n" | 866 | " Alias: {:#012x} - {:#012x}\n" |
| 791 | " Aslr: {:#012x} - {:#012x}\n" | 867 | " Heap: {:#012x} - {:#012x}\n" |
| 792 | " Stack: {:#012x} - {:#012x}\n" | 868 | " Aslr: {:#012x} - {:#012x}\n" |
| 793 | "Modules:\n", | 869 | " Stack: {:#012x} - {:#012x}\n" |
| 794 | GetInteger(page_table.GetAliasRegionStart()), | 870 | "Modules:\n", |
| 795 | GetInteger(page_table.GetAliasRegionEnd()), | 871 | GetInteger(page_table.GetAliasRegionStart()), |
| 796 | GetInteger(page_table.GetHeapRegionStart()), | 872 | GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1, |
| 797 | GetInteger(page_table.GetHeapRegionEnd()), | 873 | GetInteger(page_table.GetHeapRegionStart()), |
| 798 | GetInteger(page_table.GetAliasCodeRegionStart()), | 874 | GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1, |
| 799 | GetInteger(page_table.GetAliasCodeRegionEnd()), | 875 | GetInteger(page_table.GetAliasCodeRegionStart()), |
| 800 | GetInteger(page_table.GetStackRegionStart()), | 876 | GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() - |
| 801 | GetInteger(page_table.GetStackRegionEnd())); | 877 | 1, |
| 878 | GetInteger(page_table.GetStackRegionStart()), | ||
| 879 | GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1); | ||
| 802 | 880 | ||
| 803 | for (const auto& [vaddr, name] : modules) { | 881 | for (const auto& [vaddr, name] : modules) { |
| 804 | reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, | 882 | reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr, |
| @@ -811,27 +889,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 811 | while (true) { | 889 | while (true) { |
| 812 | using MemoryAttribute = Kernel::Svc::MemoryAttribute; | 890 | using MemoryAttribute = Kernel::Svc::MemoryAttribute; |
| 813 | 891 | ||
| 814 | auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo(); | 892 | Kernel::KMemoryInfo mem_info{}; |
| 815 | 893 | Kernel::Svc::PageInfo page_info{}; | |
| 816 | if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible || | 894 | R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), |
| 817 | mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) { | 895 | cur_addr)); |
| 818 | const char* state = GetMemoryStateName(mem_info.state); | 896 | auto svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 819 | const char* perm = GetMemoryPermissionString(mem_info); | 897 | |
| 820 | 898 | if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible || | |
| 821 | const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; | 899 | svc_mem_info.base_address + svc_mem_info.size - 1 != |
| 822 | const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; | 900 | std::numeric_limits<u64>::max()) { |
| 823 | const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; | 901 | const char* state = GetMemoryStateName(svc_mem_info.state); |
| 824 | const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; | 902 | const char* perm = GetMemoryPermissionString(svc_mem_info); |
| 903 | |||
| 904 | const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-'; | ||
| 905 | const char i = | ||
| 906 | True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; | ||
| 907 | const char d = | ||
| 908 | True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; | ||
| 909 | const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; | ||
| 825 | const char p = | 910 | const char p = |
| 826 | True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; | 911 | True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; |
| 827 | 912 | ||
| 828 | reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", | 913 | reply += fmt::format( |
| 829 | mem_info.base_address, | 914 | " {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address, |
| 830 | mem_info.base_address + mem_info.size - 1, perm, state, l, i, | 915 | svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p, |
| 831 | d, u, p, mem_info.ipc_count, mem_info.device_count); | 916 | svc_mem_info.ipc_count, svc_mem_info.device_count); |
| 832 | } | 917 | } |
| 833 | 918 | ||
| 834 | const uintptr_t next_address = mem_info.base_address + mem_info.size; | 919 | const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size; |
| 835 | if (next_address <= cur_addr) { | 920 | if (next_address <= cur_addr) { |
| 836 | break; | 921 | break; |
| 837 | } | 922 | } |
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index 8e2894449..b08a71446 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp | |||
| @@ -96,18 +96,7 @@ void EmulatedController::ReloadFromSettings() { | |||
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | controller.color_values = {}; | 98 | controller.color_values = {}; |
| 99 | controller.colors_state.fullkey = { | 99 | ReloadColorsFromSettings(); |
| 100 | .body = GetNpadColor(player.body_color_left), | ||
| 101 | .button = GetNpadColor(player.button_color_left), | ||
| 102 | }; | ||
| 103 | controller.colors_state.left = { | ||
| 104 | .body = GetNpadColor(player.body_color_left), | ||
| 105 | .button = GetNpadColor(player.button_color_left), | ||
| 106 | }; | ||
| 107 | controller.colors_state.right = { | ||
| 108 | .body = GetNpadColor(player.body_color_right), | ||
| 109 | .button = GetNpadColor(player.button_color_right), | ||
| 110 | }; | ||
| 111 | 100 | ||
| 112 | ring_params[0] = Common::ParamPackage(Settings::values.ringcon_analogs); | 101 | ring_params[0] = Common::ParamPackage(Settings::values.ringcon_analogs); |
| 113 | 102 | ||
| @@ -128,6 +117,30 @@ void EmulatedController::ReloadFromSettings() { | |||
| 128 | ReloadInput(); | 117 | ReloadInput(); |
| 129 | } | 118 | } |
| 130 | 119 | ||
| 120 | void EmulatedController::ReloadColorsFromSettings() { | ||
| 121 | const auto player_index = NpadIdTypeToIndex(npad_id_type); | ||
| 122 | const auto& player = Settings::values.players.GetValue()[player_index]; | ||
| 123 | |||
| 124 | // Avoid updating colors if overridden by physical controller | ||
| 125 | if (controller.color_values[LeftIndex].body != 0 && | ||
| 126 | controller.color_values[RightIndex].body != 0) { | ||
| 127 | return; | ||
| 128 | } | ||
| 129 | |||
| 130 | controller.colors_state.fullkey = { | ||
| 131 | .body = GetNpadColor(player.body_color_left), | ||
| 132 | .button = GetNpadColor(player.button_color_left), | ||
| 133 | }; | ||
| 134 | controller.colors_state.left = { | ||
| 135 | .body = GetNpadColor(player.body_color_left), | ||
| 136 | .button = GetNpadColor(player.button_color_left), | ||
| 137 | }; | ||
| 138 | controller.colors_state.right = { | ||
| 139 | .body = GetNpadColor(player.body_color_right), | ||
| 140 | .button = GetNpadColor(player.button_color_right), | ||
| 141 | }; | ||
| 142 | } | ||
| 143 | |||
| 131 | void EmulatedController::LoadDevices() { | 144 | void EmulatedController::LoadDevices() { |
| 132 | // TODO(german77): Use more buttons to detect the correct device | 145 | // TODO(german77): Use more buttons to detect the correct device |
| 133 | const auto left_joycon = button_params[Settings::NativeButton::DRight]; | 146 | const auto left_joycon = button_params[Settings::NativeButton::DRight]; |
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h index d4500583e..ea18c2343 100644 --- a/src/core/hid/emulated_controller.h +++ b/src/core/hid/emulated_controller.h | |||
| @@ -253,6 +253,9 @@ public: | |||
| 253 | /// Overrides current mapped devices with the stored configuration and reloads all input devices | 253 | /// Overrides current mapped devices with the stored configuration and reloads all input devices |
| 254 | void ReloadFromSettings(); | 254 | void ReloadFromSettings(); |
| 255 | 255 | ||
| 256 | /// Updates current colors with the ones stored in the configuration | ||
| 257 | void ReloadColorsFromSettings(); | ||
| 258 | |||
| 256 | /// Saves the current mapped configuration | 259 | /// Saves the current mapped configuration |
| 257 | void SaveCurrentConfig(); | 260 | void SaveCurrentConfig(); |
| 258 | 261 | ||
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index 59364efa1..37fa39a73 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp | |||
| @@ -222,7 +222,7 @@ Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* | |||
| 222 | }; | 222 | }; |
| 223 | 223 | ||
| 224 | // We succeeded. | 224 | // We succeeded. |
| 225 | *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr); | 225 | *out = KPageTable::GetHeapVirtualAddress(kernel, paddr); |
| 226 | R_SUCCEED(); | 226 | R_SUCCEED(); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| @@ -238,8 +238,17 @@ void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress addres | |||
| 238 | ASSERT(Common::IsAligned(size, alignment)); | 238 | ASSERT(Common::IsAligned(size, alignment)); |
| 239 | 239 | ||
| 240 | // Close the secure region's pages. | 240 | // Close the secure region's pages. |
| 241 | kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address), | 241 | kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address), |
| 242 | size / PageSize); | 242 | size / PageSize); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | // Insecure Memory. | ||
| 246 | KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) { | ||
| 247 | return kernel.GetSystemResourceLimit(); | ||
| 248 | } | ||
| 249 | |||
| 250 | u32 KSystemControl::GetInsecureMemoryPool() { | ||
| 251 | return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure); | ||
| 252 | } | ||
| 253 | |||
| 245 | } // namespace Kernel::Board::Nintendo::Nx | 254 | } // namespace Kernel::Board::Nintendo::Nx |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index ff1feec70..60c5e58b7 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -8,7 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | namespace Kernel { | 9 | namespace Kernel { |
| 10 | class KernelCore; | 10 | class KernelCore; |
| 11 | } | 11 | class KResourceLimit; |
| 12 | } // namespace Kernel | ||
| 12 | 13 | ||
| 13 | namespace Kernel::Board::Nintendo::Nx { | 14 | namespace Kernel::Board::Nintendo::Nx { |
| 14 | 15 | ||
| @@ -40,6 +41,10 @@ public: | |||
| 40 | u32 pool); | 41 | u32 pool); |
| 41 | static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size, | 42 | static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size, |
| 42 | u32 pool); | 43 | u32 pool); |
| 44 | |||
| 45 | // Insecure Memory. | ||
| 46 | static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel); | ||
| 47 | static u32 GetInsecureMemoryPool(); | ||
| 43 | }; | 48 | }; |
| 44 | 49 | ||
| 45 | } // namespace Kernel::Board::Nintendo::Nx | 50 | } // namespace Kernel::Board::Nintendo::Nx |
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp index e7da7a21d..274fee493 100644 --- a/src/core/hle/kernel/k_capabilities.cpp +++ b/src/core/hle/kernel/k_capabilities.cpp | |||
| @@ -4,14 +4,16 @@ | |||
| 4 | #include "core/hardware_properties.h" | 4 | #include "core/hardware_properties.h" |
| 5 | #include "core/hle/kernel/k_capabilities.h" | 5 | #include "core/hle/kernel/k_capabilities.h" |
| 6 | #include "core/hle/kernel/k_memory_layout.h" | 6 | #include "core/hle/kernel/k_memory_layout.h" |
| 7 | #include "core/hle/kernel/k_page_table.h" | 7 | #include "core/hle/kernel/k_process_page_table.h" |
| 8 | #include "core/hle/kernel/k_trace.h" | ||
| 8 | #include "core/hle/kernel/kernel.h" | 9 | #include "core/hle/kernel/kernel.h" |
| 9 | #include "core/hle/kernel/svc_results.h" | 10 | #include "core/hle/kernel/svc_results.h" |
| 10 | #include "core/hle/kernel/svc_version.h" | 11 | #include "core/hle/kernel/svc_version.h" |
| 11 | 12 | ||
| 12 | namespace Kernel { | 13 | namespace Kernel { |
| 13 | 14 | ||
| 14 | Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) { | 15 | Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, |
| 16 | KProcessPageTable* page_table) { | ||
| 15 | // We're initializing an initial process. | 17 | // We're initializing an initial process. |
| 16 | m_svc_access_flags.reset(); | 18 | m_svc_access_flags.reset(); |
| 17 | m_irq_access_flags.reset(); | 19 | m_irq_access_flags.reset(); |
| @@ -41,7 +43,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl | |||
| 41 | R_RETURN(this->SetCapabilities(kern_caps, page_table)); | 43 | R_RETURN(this->SetCapabilities(kern_caps, page_table)); |
| 42 | } | 44 | } |
| 43 | 45 | ||
| 44 | Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) { | 46 | Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, |
| 47 | KProcessPageTable* page_table) { | ||
| 45 | // We're initializing a user process. | 48 | // We're initializing a user process. |
| 46 | m_svc_access_flags.reset(); | 49 | m_svc_access_flags.reset(); |
| 47 | m_irq_access_flags.reset(); | 50 | m_irq_access_flags.reset(); |
| @@ -121,7 +124,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) { | |||
| 121 | R_SUCCEED(); | 124 | R_SUCCEED(); |
| 122 | } | 125 | } |
| 123 | 126 | ||
| 124 | Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) { | 127 | Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) { |
| 125 | const auto range_pack = MapRange{cap}; | 128 | const auto range_pack = MapRange{cap}; |
| 126 | const auto size_pack = MapRangeSize{size_cap}; | 129 | const auto size_pack = MapRangeSize{size_cap}; |
| 127 | 130 | ||
| @@ -142,16 +145,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p | |||
| 142 | ? KMemoryPermission::UserRead | 145 | ? KMemoryPermission::UserRead |
| 143 | : KMemoryPermission::UserReadWrite; | 146 | : KMemoryPermission::UserReadWrite; |
| 144 | if (MapRangeSize{size_cap}.normal) { | 147 | if (MapRangeSize{size_cap}.normal) { |
| 145 | // R_RETURN(page_table->MapStatic(phys_addr, size, perm)); | 148 | R_RETURN(page_table->MapStatic(phys_addr, size, perm)); |
| 146 | } else { | 149 | } else { |
| 147 | // R_RETURN(page_table->MapIo(phys_addr, size, perm)); | 150 | R_RETURN(page_table->MapIo(phys_addr, size, perm)); |
| 148 | } | 151 | } |
| 149 | |||
| 150 | UNIMPLEMENTED(); | ||
| 151 | R_SUCCEED(); | ||
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { | 154 | Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) { |
| 155 | // Get/validate address/size | 155 | // Get/validate address/size |
| 156 | const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize; | 156 | const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize; |
| 157 | const size_t num_pages = 1; | 157 | const size_t num_pages = 1; |
| @@ -160,10 +160,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) { | |||
| 160 | R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress); | 160 | R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress); |
| 161 | 161 | ||
| 162 | // Do the mapping. | 162 | // Do the mapping. |
| 163 | // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite)); | 163 | R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite)); |
| 164 | |||
| 165 | UNIMPLEMENTED(); | ||
| 166 | R_SUCCEED(); | ||
| 167 | } | 164 | } |
| 168 | 165 | ||
| 169 | template <typename F> | 166 | template <typename F> |
| @@ -200,13 +197,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) { | |||
| 200 | R_SUCCEED(); | 197 | R_SUCCEED(); |
| 201 | } | 198 | } |
| 202 | 199 | ||
| 203 | Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) { | 200 | Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) { |
| 204 | // Map each region into the process's page table. | 201 | // Map each region into the process's page table. |
| 205 | return ProcessMapRegionCapability( | 202 | return ProcessMapRegionCapability( |
| 206 | cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { | 203 | cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result { |
| 207 | // R_RETURN(page_table->MapRegion(region_type, perm)); | 204 | R_RETURN(page_table->MapRegion(region_type, perm)); |
| 208 | UNIMPLEMENTED(); | ||
| 209 | R_SUCCEED(); | ||
| 210 | }); | 205 | }); |
| 211 | } | 206 | } |
| 212 | 207 | ||
| @@ -280,7 +275,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) { | |||
| 280 | } | 275 | } |
| 281 | 276 | ||
| 282 | Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, | 277 | Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, |
| 283 | KPageTable* page_table) { | 278 | KProcessPageTable* page_table) { |
| 284 | // Validate this is a capability we can act on. | 279 | // Validate this is a capability we can act on. |
| 285 | const auto type = GetCapabilityType(cap); | 280 | const auto type = GetCapabilityType(cap); |
| 286 | R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument); | 281 | R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument); |
| @@ -318,7 +313,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc, | |||
| 318 | } | 313 | } |
| 319 | } | 314 | } |
| 320 | 315 | ||
| 321 | Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) { | 316 | Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) { |
| 322 | u32 set_flags = 0, set_svc = 0; | 317 | u32 set_flags = 0, set_svc = 0; |
| 323 | 318 | ||
| 324 | for (size_t i = 0; i < caps.size(); i++) { | 319 | for (size_t i = 0; i < caps.size(); i++) { |
| @@ -335,6 +330,8 @@ Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* pag | |||
| 335 | 330 | ||
| 336 | // Map the range. | 331 | // Map the range. |
| 337 | R_TRY(this->MapRange_(cap, size_cap, page_table)); | 332 | R_TRY(this->MapRange_(cap, size_cap, page_table)); |
| 333 | } else if (GetCapabilityType(cap) == CapabilityType::MapRegion && !IsKTraceEnabled) { | ||
| 334 | continue; | ||
| 338 | } else { | 335 | } else { |
| 339 | R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table)); | 336 | R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table)); |
| 340 | } | 337 | } |
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h index ebd4eedb1..013d952ad 100644 --- a/src/core/hle/kernel/k_capabilities.h +++ b/src/core/hle/kernel/k_capabilities.h | |||
| @@ -15,15 +15,15 @@ | |||
| 15 | 15 | ||
| 16 | namespace Kernel { | 16 | namespace Kernel { |
| 17 | 17 | ||
| 18 | class KPageTable; | 18 | class KProcessPageTable; |
| 19 | class KernelCore; | 19 | class KernelCore; |
| 20 | 20 | ||
| 21 | class KCapabilities { | 21 | class KCapabilities { |
| 22 | public: | 22 | public: |
| 23 | constexpr explicit KCapabilities() = default; | 23 | constexpr explicit KCapabilities() = default; |
| 24 | 24 | ||
| 25 | Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table); | 25 | Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table); |
| 26 | Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table); | 26 | Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table); |
| 27 | 27 | ||
| 28 | static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); | 28 | static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps); |
| 29 | 29 | ||
| @@ -264,9 +264,9 @@ private: | |||
| 264 | 264 | ||
| 265 | Result SetCorePriorityCapability(const u32 cap); | 265 | Result SetCorePriorityCapability(const u32 cap); |
| 266 | Result SetSyscallMaskCapability(const u32 cap, u32& set_svc); | 266 | Result SetSyscallMaskCapability(const u32 cap, u32& set_svc); |
| 267 | Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table); | 267 | Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table); |
| 268 | Result MapIoPage_(const u32 cap, KPageTable* page_table); | 268 | Result MapIoPage_(const u32 cap, KProcessPageTable* page_table); |
| 269 | Result MapRegion_(const u32 cap, KPageTable* page_table); | 269 | Result MapRegion_(const u32 cap, KProcessPageTable* page_table); |
| 270 | Result SetInterruptPairCapability(const u32 cap); | 270 | Result SetInterruptPairCapability(const u32 cap); |
| 271 | Result SetProgramTypeCapability(const u32 cap); | 271 | Result SetProgramTypeCapability(const u32 cap); |
| 272 | Result SetKernelVersionCapability(const u32 cap); | 272 | Result SetKernelVersionCapability(const u32 cap); |
| @@ -277,8 +277,9 @@ private: | |||
| 277 | static Result ProcessMapRegionCapability(const u32 cap, F f); | 277 | static Result ProcessMapRegionCapability(const u32 cap, F f); |
| 278 | static Result CheckMapRegion(KernelCore& kernel, const u32 cap); | 278 | static Result CheckMapRegion(KernelCore& kernel, const u32 cap); |
| 279 | 279 | ||
| 280 | Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table); | 280 | Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, |
| 281 | Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table); | 281 | KProcessPageTable* page_table); |
| 282 | Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table); | ||
| 282 | 283 | ||
| 283 | private: | 284 | private: |
| 284 | Svc::SvcAccessFlagSet m_svc_access_flags{}; | 285 | Svc::SvcAccessFlagSet m_svc_access_flags{}; |
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp index f48896715..f0703f795 100644 --- a/src/core/hle/kernel/k_device_address_space.cpp +++ b/src/core/hle/kernel/k_device_address_space.cpp | |||
| @@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) { | |||
| 54 | R_SUCCEED(); | 54 | R_SUCCEED(); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address, | 57 | Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address, |
| 58 | size_t size, u64 device_address, u32 option, bool is_aligned) { | 58 | size_t size, u64 device_address, u32 option, bool is_aligned) { |
| 59 | // Check that the address falls within the space. | 59 | // Check that the address falls within the space. |
| 60 | R_UNLESS((m_space_address <= device_address && | 60 | R_UNLESS((m_space_address <= device_address && |
| @@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_ | |||
| 113 | R_SUCCEED(); | 113 | R_SUCCEED(); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address, | 116 | Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address, |
| 117 | size_t size, u64 device_address) { | 117 | size_t size, u64 device_address) { |
| 118 | // Check that the address falls within the space. | 118 | // Check that the address falls within the space. |
| 119 | R_UNLESS((m_space_address <= device_address && | 119 | R_UNLESS((m_space_address <= device_address && |
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h index 18556e3cc..ff0ec8152 100644 --- a/src/core/hle/kernel/k_device_address_space.h +++ b/src/core/hle/kernel/k_device_address_space.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <string> | 6 | #include <string> |
| 7 | 7 | ||
| 8 | #include "core/hle/kernel/k_page_table.h" | 8 | #include "core/hle/kernel/k_process_page_table.h" |
| 9 | #include "core/hle/kernel/k_typed_address.h" | 9 | #include "core/hle/kernel/k_typed_address.h" |
| 10 | #include "core/hle/kernel/slab_helpers.h" | 10 | #include "core/hle/kernel/slab_helpers.h" |
| 11 | #include "core/hle/result.h" | 11 | #include "core/hle/result.h" |
| @@ -31,23 +31,23 @@ public: | |||
| 31 | Result Attach(Svc::DeviceName device_name); | 31 | Result Attach(Svc::DeviceName device_name); |
| 32 | Result Detach(Svc::DeviceName device_name); | 32 | Result Detach(Svc::DeviceName device_name); |
| 33 | 33 | ||
| 34 | Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size, | 34 | Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 35 | u64 device_address, u32 option) { | 35 | u64 device_address, u32 option) { |
| 36 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); | 36 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, false)); |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size, | 39 | Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 40 | u64 device_address, u32 option) { | 40 | u64 device_address, u32 option) { |
| 41 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); | 41 | R_RETURN(this->Map(page_table, process_address, size, device_address, option, true)); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size, | 44 | Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 45 | u64 device_address); | 45 | u64 device_address); |
| 46 | 46 | ||
| 47 | static void Initialize(); | 47 | static void Initialize(); |
| 48 | 48 | ||
| 49 | private: | 49 | private: |
| 50 | Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size, | 50 | Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size, |
| 51 | u64 device_address, u32 option, bool is_aligned); | 51 | u64 device_address, u32 option, bool is_aligned); |
| 52 | 52 | ||
| 53 | private: | 53 | private: |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index c8122644f..d7adb3169 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -394,6 +394,14 @@ private: | |||
| 394 | return region.GetEndAddress(); | 394 | return region.GetEndAddress(); |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | public: | ||
| 398 | static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) { | ||
| 399 | return Find(address, layout.GetVirtualMemoryRegionTree()); | ||
| 400 | } | ||
| 401 | static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) { | ||
| 402 | return Find(address, layout.GetPhysicalMemoryRegionTree()); | ||
| 403 | } | ||
| 404 | |||
| 397 | private: | 405 | private: |
| 398 | u64 m_linear_phys_to_virt_diff{}; | 406 | u64 m_linear_phys_to_virt_diff{}; |
| 399 | u64 m_linear_virt_to_phys_diff{}; | 407 | u64 m_linear_virt_to_phys_diff{}; |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index cdc5572d8..0a973ec8c 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -456,8 +456,7 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, | |||
| 456 | } | 456 | } |
| 457 | 457 | ||
| 458 | void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { | 458 | void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { |
| 459 | auto optimize_pa = | 459 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 460 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 461 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); | 460 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); |
| 462 | 461 | ||
| 463 | std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); | 462 | std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); |
| @@ -465,8 +464,7 @@ void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) { | |||
| 465 | 464 | ||
| 466 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, | 465 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, |
| 467 | size_t num_pages) { | 466 | size_t num_pages) { |
| 468 | auto optimize_pa = | 467 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 469 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 470 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); | 468 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); |
| 471 | 469 | ||
| 472 | // Get the range we're tracking. | 470 | // Get the range we're tracking. |
| @@ -485,8 +483,7 @@ void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysi | |||
| 485 | 483 | ||
| 486 | void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, | 484 | void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, |
| 487 | size_t num_pages) { | 485 | size_t num_pages) { |
| 488 | auto optimize_pa = | 486 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 489 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 490 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); | 487 | auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa); |
| 491 | 488 | ||
| 492 | // Get the range we're tracking. | 489 | // Get the range we're tracking. |
| @@ -506,8 +503,7 @@ void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysica | |||
| 506 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, | 503 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, |
| 507 | size_t num_pages, u8 fill_pattern) { | 504 | size_t num_pages, u8 fill_pattern) { |
| 508 | auto& device_memory = kernel.System().DeviceMemory(); | 505 | auto& device_memory = kernel.System().DeviceMemory(); |
| 509 | auto optimize_pa = | 506 | auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region); |
| 510 | KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region); | ||
| 511 | auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa); | 507 | auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa); |
| 512 | 508 | ||
| 513 | // We want to return whether any pages were newly allocated. | 509 | // We want to return whether any pages were newly allocated. |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp deleted file mode 100644 index 1d47bdf6b..000000000 --- a/src/core/hle/kernel/k_page_table.cpp +++ /dev/null | |||
| @@ -1,3519 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/alignment.h" | ||
| 5 | #include "common/assert.h" | ||
| 6 | #include "common/literals.h" | ||
| 7 | #include "common/scope_exit.h" | ||
| 8 | #include "common/settings.h" | ||
| 9 | #include "core/core.h" | ||
| 10 | #include "core/hle/kernel/k_address_space_info.h" | ||
| 11 | #include "core/hle/kernel/k_memory_block.h" | ||
| 12 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 13 | #include "core/hle/kernel/k_page_group.h" | ||
| 14 | #include "core/hle/kernel/k_page_table.h" | ||
| 15 | #include "core/hle/kernel/k_process.h" | ||
| 16 | #include "core/hle/kernel/k_resource_limit.h" | ||
| 17 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | ||
| 18 | #include "core/hle/kernel/k_system_control.h" | ||
| 19 | #include "core/hle/kernel/k_system_resource.h" | ||
| 20 | #include "core/hle/kernel/kernel.h" | ||
| 21 | #include "core/hle/kernel/svc_results.h" | ||
| 22 | #include "core/memory.h" | ||
| 23 | |||
| 24 | namespace Kernel { | ||
| 25 | |||
| 26 | namespace { | ||
| 27 | |||
| 28 | class KScopedLightLockPair { | ||
| 29 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 30 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 31 | |||
| 32 | private: | ||
| 33 | KLightLock* m_lower; | ||
| 34 | KLightLock* m_upper; | ||
| 35 | |||
| 36 | public: | ||
| 37 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 38 | // Ensure our locks are in a consistent order. | ||
| 39 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 40 | m_lower = std::addressof(lhs); | ||
| 41 | m_upper = std::addressof(rhs); | ||
| 42 | } else { | ||
| 43 | m_lower = std::addressof(rhs); | ||
| 44 | m_upper = std::addressof(lhs); | ||
| 45 | } | ||
| 46 | |||
| 47 | // Acquire both locks. | ||
| 48 | m_lower->Lock(); | ||
| 49 | if (m_lower != m_upper) { | ||
| 50 | m_upper->Lock(); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | ~KScopedLightLockPair() { | ||
| 55 | // Unlock the upper lock. | ||
| 56 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 57 | m_upper->Unlock(); | ||
| 58 | } | ||
| 59 | |||
| 60 | // Unlock the lower lock. | ||
| 61 | if (m_lower != nullptr) { | ||
| 62 | m_lower->Unlock(); | ||
| 63 | } | ||
| 64 | } | ||
| 65 | |||
| 66 | public: | ||
| 67 | // Utility. | ||
| 68 | void TryUnlockHalf(KLightLock& lock) { | ||
| 69 | // Only allow unlocking if the lock is half the pair. | ||
| 70 | if (m_lower != m_upper) { | ||
| 71 | // We want to be sure the lock is one we own. | ||
| 72 | if (m_lower == std::addressof(lock)) { | ||
| 73 | lock.Unlock(); | ||
| 74 | m_lower = nullptr; | ||
| 75 | } else if (m_upper == std::addressof(lock)) { | ||
| 76 | lock.Unlock(); | ||
| 77 | m_upper = nullptr; | ||
| 78 | } | ||
| 79 | } | ||
| 80 | } | ||
| 81 | }; | ||
| 82 | |||
| 83 | using namespace Common::Literals; | ||
| 84 | |||
| 85 | constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) { | ||
| 86 | switch (as_type) { | ||
| 87 | case Svc::CreateProcessFlag::AddressSpace32Bit: | ||
| 88 | case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias: | ||
| 89 | return 32; | ||
| 90 | case Svc::CreateProcessFlag::AddressSpace64BitDeprecated: | ||
| 91 | return 36; | ||
| 92 | case Svc::CreateProcessFlag::AddressSpace64Bit: | ||
| 93 | return 39; | ||
| 94 | default: | ||
| 95 | ASSERT(false); | ||
| 96 | return {}; | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | } // namespace | ||
| 101 | |||
| 102 | KPageTable::KPageTable(Core::System& system_) | ||
| 103 | : m_general_lock{system_.Kernel()}, | ||
| 104 | m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {} | ||
| 105 | |||
| 106 | KPageTable::~KPageTable() = default; | ||
| 107 | |||
| 108 | Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 109 | bool enable_das_merge, bool from_back, | ||
| 110 | KMemoryManager::Pool pool, KProcessAddress code_addr, | ||
| 111 | size_t code_size, KSystemResource* system_resource, | ||
| 112 | KResourceLimit* resource_limit, | ||
| 113 | Core::Memory::Memory& memory) { | ||
| 114 | |||
| 115 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | ||
| 116 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | ||
| 117 | }; | ||
| 118 | const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { | ||
| 119 | return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); | ||
| 120 | }; | ||
| 121 | |||
| 122 | // Set the tracking memory | ||
| 123 | m_memory = std::addressof(memory); | ||
| 124 | |||
| 125 | // Set our width and heap/alias sizes | ||
| 126 | m_address_space_width = GetAddressSpaceWidthFromType(as_type); | ||
| 127 | const KProcessAddress start = 0; | ||
| 128 | const KProcessAddress end{1ULL << m_address_space_width}; | ||
| 129 | size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; | ||
| 130 | size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; | ||
| 131 | |||
| 132 | ASSERT(code_addr < code_addr + code_size); | ||
| 133 | ASSERT(code_addr + code_size - 1 <= end - 1); | ||
| 134 | |||
| 135 | // Adjust heap/alias size if we don't have an alias region | ||
| 136 | if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) { | ||
| 137 | heap_region_size += alias_region_size; | ||
| 138 | alias_region_size = 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | // Set code regions and determine remaining | ||
| 142 | constexpr size_t RegionAlignment{2_MiB}; | ||
| 143 | KProcessAddress process_code_start{}; | ||
| 144 | KProcessAddress process_code_end{}; | ||
| 145 | size_t stack_region_size{}; | ||
| 146 | size_t kernel_map_region_size{}; | ||
| 147 | |||
| 148 | if (m_address_space_width == 39) { | ||
| 149 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | ||
| 150 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | ||
| 151 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); | ||
| 152 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 153 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); | ||
| 154 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); | ||
| 155 | m_alias_code_region_start = m_code_region_start; | ||
| 156 | m_alias_code_region_end = m_code_region_end; | ||
| 157 | process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment); | ||
| 158 | process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment); | ||
| 159 | } else { | ||
| 160 | stack_region_size = 0; | ||
| 161 | kernel_map_region_size = 0; | ||
| 162 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); | ||
| 163 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 164 | m_stack_region_start = m_code_region_start; | ||
| 165 | m_alias_code_region_start = m_code_region_start; | ||
| 166 | m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + | ||
| 167 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); | ||
| 168 | m_stack_region_end = m_code_region_end; | ||
| 169 | m_kernel_map_region_start = m_code_region_start; | ||
| 170 | m_kernel_map_region_end = m_code_region_end; | ||
| 171 | process_code_start = m_code_region_start; | ||
| 172 | process_code_end = m_code_region_end; | ||
| 173 | } | ||
| 174 | |||
| 175 | // Set other basic fields | ||
| 176 | m_enable_aslr = enable_aslr; | ||
| 177 | m_enable_device_address_space_merge = enable_das_merge; | ||
| 178 | m_address_space_start = start; | ||
| 179 | m_address_space_end = end; | ||
| 180 | m_is_kernel = false; | ||
| 181 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); | ||
| 182 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 183 | m_resource_limit = resource_limit; | ||
| 184 | |||
| 185 | // Determine the region we can place our undetermineds in | ||
| 186 | KProcessAddress alloc_start{}; | ||
| 187 | size_t alloc_size{}; | ||
| 188 | if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { | ||
| 189 | alloc_start = m_code_region_start; | ||
| 190 | alloc_size = process_code_start - m_code_region_start; | ||
| 191 | } else { | ||
| 192 | alloc_start = process_code_end; | ||
| 193 | alloc_size = end - process_code_end; | ||
| 194 | } | ||
| 195 | const size_t needed_size = | ||
| 196 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); | ||
| 197 | R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); | ||
| 198 | |||
| 199 | const size_t remaining_size{alloc_size - needed_size}; | ||
| 200 | |||
| 201 | // Determine random placements for each region | ||
| 202 | size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; | ||
| 203 | if (enable_aslr) { | ||
| 204 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 205 | RegionAlignment; | ||
| 206 | heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 207 | RegionAlignment; | ||
| 208 | stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 209 | RegionAlignment; | ||
| 210 | kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 211 | RegionAlignment; | ||
| 212 | } | ||
| 213 | |||
| 214 | // Setup heap and alias regions | ||
| 215 | m_alias_region_start = alloc_start + alias_rnd; | ||
| 216 | m_alias_region_end = m_alias_region_start + alias_region_size; | ||
| 217 | m_heap_region_start = alloc_start + heap_rnd; | ||
| 218 | m_heap_region_end = m_heap_region_start + heap_region_size; | ||
| 219 | |||
| 220 | if (alias_rnd <= heap_rnd) { | ||
| 221 | m_heap_region_start += alias_region_size; | ||
| 222 | m_heap_region_end += alias_region_size; | ||
| 223 | } else { | ||
| 224 | m_alias_region_start += heap_region_size; | ||
| 225 | m_alias_region_end += heap_region_size; | ||
| 226 | } | ||
| 227 | |||
| 228 | // Setup stack region | ||
| 229 | if (stack_region_size) { | ||
| 230 | m_stack_region_start = alloc_start + stack_rnd; | ||
| 231 | m_stack_region_end = m_stack_region_start + stack_region_size; | ||
| 232 | |||
| 233 | if (alias_rnd < stack_rnd) { | ||
| 234 | m_stack_region_start += alias_region_size; | ||
| 235 | m_stack_region_end += alias_region_size; | ||
| 236 | } else { | ||
| 237 | m_alias_region_start += stack_region_size; | ||
| 238 | m_alias_region_end += stack_region_size; | ||
| 239 | } | ||
| 240 | |||
| 241 | if (heap_rnd < stack_rnd) { | ||
| 242 | m_stack_region_start += heap_region_size; | ||
| 243 | m_stack_region_end += heap_region_size; | ||
| 244 | } else { | ||
| 245 | m_heap_region_start += stack_region_size; | ||
| 246 | m_heap_region_end += stack_region_size; | ||
| 247 | } | ||
| 248 | } | ||
| 249 | |||
| 250 | // Setup kernel map region | ||
| 251 | if (kernel_map_region_size) { | ||
| 252 | m_kernel_map_region_start = alloc_start + kmap_rnd; | ||
| 253 | m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; | ||
| 254 | |||
| 255 | if (alias_rnd < kmap_rnd) { | ||
| 256 | m_kernel_map_region_start += alias_region_size; | ||
| 257 | m_kernel_map_region_end += alias_region_size; | ||
| 258 | } else { | ||
| 259 | m_alias_region_start += kernel_map_region_size; | ||
| 260 | m_alias_region_end += kernel_map_region_size; | ||
| 261 | } | ||
| 262 | |||
| 263 | if (heap_rnd < kmap_rnd) { | ||
| 264 | m_kernel_map_region_start += heap_region_size; | ||
| 265 | m_kernel_map_region_end += heap_region_size; | ||
| 266 | } else { | ||
| 267 | m_heap_region_start += kernel_map_region_size; | ||
| 268 | m_heap_region_end += kernel_map_region_size; | ||
| 269 | } | ||
| 270 | |||
| 271 | if (stack_region_size) { | ||
| 272 | if (stack_rnd < kmap_rnd) { | ||
| 273 | m_kernel_map_region_start += stack_region_size; | ||
| 274 | m_kernel_map_region_end += stack_region_size; | ||
| 275 | } else { | ||
| 276 | m_stack_region_start += kernel_map_region_size; | ||
| 277 | m_stack_region_end += kernel_map_region_size; | ||
| 278 | } | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | // Set heap and fill members. | ||
| 283 | m_current_heap_end = m_heap_region_start; | ||
| 284 | m_max_heap_size = 0; | ||
| 285 | m_mapped_physical_memory_size = 0; | ||
| 286 | m_mapped_unsafe_physical_memory = 0; | ||
| 287 | m_mapped_insecure_memory = 0; | ||
| 288 | m_mapped_ipc_server_memory = 0; | ||
| 289 | |||
| 290 | m_heap_fill_value = 0; | ||
| 291 | m_ipc_fill_value = 0; | ||
| 292 | m_stack_fill_value = 0; | ||
| 293 | |||
| 294 | // Set allocation option. | ||
| 295 | m_allocate_option = | ||
| 296 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 297 | : KMemoryManager::Direction::FromFront); | ||
| 298 | |||
| 299 | // Ensure that we regions inside our address space | ||
| 300 | auto IsInAddressSpace = [&](KProcessAddress addr) { | ||
| 301 | return m_address_space_start <= addr && addr <= m_address_space_end; | ||
| 302 | }; | ||
| 303 | ASSERT(IsInAddressSpace(m_alias_region_start)); | ||
| 304 | ASSERT(IsInAddressSpace(m_alias_region_end)); | ||
| 305 | ASSERT(IsInAddressSpace(m_heap_region_start)); | ||
| 306 | ASSERT(IsInAddressSpace(m_heap_region_end)); | ||
| 307 | ASSERT(IsInAddressSpace(m_stack_region_start)); | ||
| 308 | ASSERT(IsInAddressSpace(m_stack_region_end)); | ||
| 309 | ASSERT(IsInAddressSpace(m_kernel_map_region_start)); | ||
| 310 | ASSERT(IsInAddressSpace(m_kernel_map_region_end)); | ||
| 311 | |||
| 312 | // Ensure that we selected regions that don't overlap | ||
| 313 | const KProcessAddress alias_start{m_alias_region_start}; | ||
| 314 | const KProcessAddress alias_last{m_alias_region_end - 1}; | ||
| 315 | const KProcessAddress heap_start{m_heap_region_start}; | ||
| 316 | const KProcessAddress heap_last{m_heap_region_end - 1}; | ||
| 317 | const KProcessAddress stack_start{m_stack_region_start}; | ||
| 318 | const KProcessAddress stack_last{m_stack_region_end - 1}; | ||
| 319 | const KProcessAddress kmap_start{m_kernel_map_region_start}; | ||
| 320 | const KProcessAddress kmap_last{m_kernel_map_region_end - 1}; | ||
| 321 | ASSERT(alias_last < heap_start || heap_last < alias_start); | ||
| 322 | ASSERT(alias_last < stack_start || stack_last < alias_start); | ||
| 323 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); | ||
| 324 | ASSERT(heap_last < stack_start || stack_last < heap_start); | ||
| 325 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); | ||
| 326 | |||
| 327 | m_current_heap_end = m_heap_region_start; | ||
| 328 | m_max_heap_size = 0; | ||
| 329 | m_mapped_physical_memory_size = 0; | ||
| 330 | m_memory_pool = pool; | ||
| 331 | |||
| 332 | m_page_table_impl = std::make_unique<Common::PageTable>(); | ||
| 333 | m_page_table_impl->Resize(m_address_space_width, PageBits); | ||
| 334 | |||
| 335 | // Initialize our memory block manager. | ||
| 336 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 337 | m_memory_block_slab_manager)); | ||
| 338 | } | ||
| 339 | |||
| 340 | void KPageTable::Finalize() { | ||
| 341 | auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { | ||
| 342 | if (Settings::IsFastmemEnabled()) { | ||
| 343 | m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); | ||
| 344 | } | ||
| 345 | }; | ||
| 346 | |||
| 347 | // Finalize memory blocks. | ||
| 348 | m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback)); | ||
| 349 | |||
| 350 | // Release any insecure mapped memory. | ||
| 351 | if (m_mapped_insecure_memory) { | ||
| 352 | UNIMPLEMENTED(); | ||
| 353 | } | ||
| 354 | |||
| 355 | // Release any ipc server memory. | ||
| 356 | if (m_mapped_ipc_server_memory) { | ||
| 357 | UNIMPLEMENTED(); | ||
| 358 | } | ||
| 359 | |||
| 360 | // Close the backing page table, as the destructor is not called for guest objects. | ||
| 361 | m_page_table_impl.reset(); | ||
| 362 | } | ||
| 363 | |||
| 364 | Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state, | ||
| 365 | KMemoryPermission perm) { | ||
| 366 | const u64 size{num_pages * PageSize}; | ||
| 367 | |||
| 368 | // Validate the mapping request. | ||
| 369 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||
| 370 | |||
| 371 | // Lock the table. | ||
| 372 | KScopedLightLock lk(m_general_lock); | ||
| 373 | |||
| 374 | // Verify that the destination memory is unmapped. | ||
| 375 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | ||
| 376 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 377 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 378 | |||
| 379 | // Create an update allocator. | ||
| 380 | Result allocator_result{ResultSuccess}; | ||
| 381 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 382 | m_memory_block_slab_manager); | ||
| 383 | |||
| 384 | // Allocate and open. | ||
| 385 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 386 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 387 | &pg, num_pages, | ||
| 388 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); | ||
| 389 | |||
| 390 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); | ||
| 391 | |||
| 392 | // Update the blocks. | ||
| 393 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 394 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 395 | KMemoryBlockDisableMergeAttribute::None); | ||
| 396 | |||
| 397 | R_SUCCEED(); | ||
| 398 | } | ||
| 399 | |||
| 400 | Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 401 | size_t size) { | ||
| 402 | // Validate the mapping request. | ||
| 403 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 404 | ResultInvalidMemoryRegion); | ||
| 405 | |||
| 406 | // Lock the table. | ||
| 407 | KScopedLightLock lk(m_general_lock); | ||
| 408 | |||
| 409 | // Verify that the source memory is normal heap. | ||
| 410 | KMemoryState src_state{}; | ||
| 411 | KMemoryPermission src_perm{}; | ||
| 412 | size_t num_src_allocator_blocks{}; | ||
| 413 | R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, | ||
| 414 | src_address, size, KMemoryState::All, KMemoryState::Normal, | ||
| 415 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 416 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 417 | |||
| 418 | // Verify that the destination memory is unmapped. | ||
| 419 | size_t num_dst_allocator_blocks{}; | ||
| 420 | R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, | ||
| 421 | KMemoryState::Free, KMemoryPermission::None, | ||
| 422 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 423 | KMemoryAttribute::None)); | ||
| 424 | |||
| 425 | // Create an update allocator for the source. | ||
| 426 | Result src_allocator_result{ResultSuccess}; | ||
| 427 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 428 | m_memory_block_slab_manager, | ||
| 429 | num_src_allocator_blocks); | ||
| 430 | R_TRY(src_allocator_result); | ||
| 431 | |||
| 432 | // Create an update allocator for the destination. | ||
| 433 | Result dst_allocator_result{ResultSuccess}; | ||
| 434 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 435 | m_memory_block_slab_manager, | ||
| 436 | num_dst_allocator_blocks); | ||
| 437 | R_TRY(dst_allocator_result); | ||
| 438 | |||
| 439 | // Map the code memory. | ||
| 440 | { | ||
| 441 | // Determine the number of pages being operated on. | ||
| 442 | const size_t num_pages = size / PageSize; | ||
| 443 | |||
| 444 | // Create page groups for the memory being mapped. | ||
| 445 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 446 | AddRegionToPages(src_address, num_pages, pg); | ||
| 447 | |||
| 448 | // We're going to perform an update, so create a helper. | ||
| 449 | KScopedPageTableUpdater updater(this); | ||
| 450 | |||
| 451 | // Reprotect the source as kernel-read/not mapped. | ||
| 452 | const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | | ||
| 453 | KMemoryPermission::NotMapped); | ||
| 454 | R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 455 | |||
| 456 | // Ensure that we unprotect the source pages on failure. | ||
| 457 | auto unprot_guard = SCOPE_GUARD({ | ||
| 458 | ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions) | ||
| 459 | .IsSuccess()); | ||
| 460 | }); | ||
| 461 | |||
| 462 | // Map the alias pages. | ||
| 463 | const KPageProperties dst_properties = {new_perm, false, false, | ||
| 464 | DisableMergeAttribute::DisableHead}; | ||
| 465 | R_TRY( | ||
| 466 | this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); | ||
| 467 | |||
| 468 | // We successfully mapped the alias pages, so we don't need to unprotect the src pages on | ||
| 469 | // failure. | ||
| 470 | unprot_guard.Cancel(); | ||
| 471 | |||
| 472 | // Apply the memory block updates. | ||
| 473 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 474 | src_state, new_perm, KMemoryAttribute::Locked, | ||
| 475 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 476 | KMemoryBlockDisableMergeAttribute::None); | ||
| 477 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||
| 478 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, | ||
| 479 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 480 | KMemoryBlockDisableMergeAttribute::None); | ||
| 481 | } | ||
| 482 | |||
| 483 | R_SUCCEED(); | ||
| 484 | } | ||
| 485 | |||
| 486 | Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 487 | size_t size, | ||
| 488 | ICacheInvalidationStrategy icache_invalidation_strategy) { | ||
| 489 | // Validate the mapping request. | ||
| 490 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 491 | ResultInvalidMemoryRegion); | ||
| 492 | |||
| 493 | // Lock the table. | ||
| 494 | KScopedLightLock lk(m_general_lock); | ||
| 495 | |||
| 496 | // Verify that the source memory is locked normal heap. | ||
| 497 | size_t num_src_allocator_blocks{}; | ||
| 498 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 499 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, | ||
| 500 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 501 | KMemoryAttribute::Locked)); | ||
| 502 | |||
| 503 | // Verify that the destination memory is aliasable code. | ||
| 504 | size_t num_dst_allocator_blocks{}; | ||
| 505 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 506 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | ||
| 507 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | ||
| 508 | KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); | ||
| 509 | |||
| 510 | // Determine whether any pages being unmapped are code. | ||
| 511 | bool any_code_pages = false; | ||
| 512 | { | ||
| 513 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); | ||
| 514 | while (true) { | ||
| 515 | // Get the memory info. | ||
| 516 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 517 | |||
| 518 | // Check if the memory has code flag. | ||
| 519 | if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) { | ||
| 520 | any_code_pages = true; | ||
| 521 | break; | ||
| 522 | } | ||
| 523 | |||
| 524 | // Check if we're done. | ||
| 525 | if (dst_address + size - 1 <= info.GetLastAddress()) { | ||
| 526 | break; | ||
| 527 | } | ||
| 528 | |||
| 529 | // Advance. | ||
| 530 | ++it; | ||
| 531 | } | ||
| 532 | } | ||
| 533 | |||
| 534 | // Ensure that we maintain the instruction cache. | ||
| 535 | bool reprotected_pages = false; | ||
| 536 | SCOPE_EXIT({ | ||
| 537 | if (reprotected_pages && any_code_pages) { | ||
| 538 | if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { | ||
| 539 | m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size); | ||
| 540 | } else { | ||
| 541 | m_system.InvalidateCpuInstructionCaches(); | ||
| 542 | } | ||
| 543 | } | ||
| 544 | }); | ||
| 545 | |||
| 546 | // Unmap. | ||
| 547 | { | ||
| 548 | // Determine the number of pages being operated on. | ||
| 549 | const size_t num_pages = size / PageSize; | ||
| 550 | |||
| 551 | // Create an update allocator for the source. | ||
| 552 | Result src_allocator_result{ResultSuccess}; | ||
| 553 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 554 | m_memory_block_slab_manager, | ||
| 555 | num_src_allocator_blocks); | ||
| 556 | R_TRY(src_allocator_result); | ||
| 557 | |||
| 558 | // Create an update allocator for the destination. | ||
| 559 | Result dst_allocator_result{ResultSuccess}; | ||
| 560 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 561 | m_memory_block_slab_manager, | ||
| 562 | num_dst_allocator_blocks); | ||
| 563 | R_TRY(dst_allocator_result); | ||
| 564 | |||
| 565 | // Unmap the aliased copy of the pages. | ||
| 566 | R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 567 | |||
| 568 | // Try to set the permissions for the source pages back to what they should be. | ||
| 569 | R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, | ||
| 570 | OperationType::ChangePermissions)); | ||
| 571 | |||
| 572 | // Apply the memory block updates. | ||
| 573 | m_memory_block_manager.Update( | ||
| 574 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 575 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 576 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 577 | m_memory_block_manager.Update( | ||
| 578 | std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, | ||
| 579 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 580 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 581 | |||
| 582 | // Note that we reprotected pages. | ||
| 583 | reprotected_pages = true; | ||
| 584 | } | ||
| 585 | |||
| 586 | R_SUCCEED(); | ||
| 587 | } | ||
| 588 | |||
| 589 | KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 590 | size_t num_pages, size_t alignment, size_t offset, | ||
| 591 | size_t guard_pages) { | ||
| 592 | KProcessAddress address = 0; | ||
| 593 | |||
| 594 | if (num_pages <= region_num_pages) { | ||
| 595 | if (this->IsAslrEnabled()) { | ||
| 596 | UNIMPLEMENTED(); | ||
| 597 | } | ||
| 598 | // Find the first free area. | ||
| 599 | if (address == 0) { | ||
| 600 | address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, | ||
| 601 | alignment, offset, guard_pages); | ||
| 602 | } | ||
| 603 | } | ||
| 604 | |||
| 605 | return address; | ||
| 606 | } | ||
| 607 | |||
| 608 | Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) { | ||
| 609 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 610 | |||
| 611 | const size_t size = num_pages * PageSize; | ||
| 612 | |||
| 613 | // We're making a new group, not adding to an existing one. | ||
| 614 | R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); | ||
| 615 | |||
| 616 | // Begin traversal. | ||
| 617 | Common::PageTable::TraversalContext context; | ||
| 618 | Common::PageTable::TraversalEntry next_entry; | ||
| 619 | R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)), | ||
| 620 | ResultInvalidCurrentMemory); | ||
| 621 | |||
| 622 | // Prepare tracking variables. | ||
| 623 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 624 | size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); | ||
| 625 | size_t tot_size = cur_size; | ||
| 626 | |||
| 627 | // Iterate, adding to group as we go. | ||
| 628 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||
| 629 | while (tot_size < size) { | ||
| 630 | R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), | ||
| 631 | ResultInvalidCurrentMemory); | ||
| 632 | |||
| 633 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 634 | const size_t cur_pages = cur_size / PageSize; | ||
| 635 | |||
| 636 | R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); | ||
| 637 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 638 | |||
| 639 | cur_addr = next_entry.phys_addr; | ||
| 640 | cur_size = next_entry.block_size; | ||
| 641 | } else { | ||
| 642 | cur_size += next_entry.block_size; | ||
| 643 | } | ||
| 644 | |||
| 645 | tot_size += next_entry.block_size; | ||
| 646 | } | ||
| 647 | |||
| 648 | // Ensure we add the right amount for the last block. | ||
| 649 | if (tot_size > size) { | ||
| 650 | cur_size -= (tot_size - size); | ||
| 651 | } | ||
| 652 | |||
| 653 | // Add the last block. | ||
| 654 | const size_t cur_pages = cur_size / PageSize; | ||
| 655 | R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); | ||
| 656 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 657 | |||
| 658 | R_SUCCEED(); | ||
| 659 | } | ||
| 660 | |||
| 661 | bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) { | ||
| 662 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 663 | |||
| 664 | const size_t size = num_pages * PageSize; | ||
| 665 | const auto& memory_layout = m_system.Kernel().MemoryLayout(); | ||
| 666 | |||
| 667 | // Empty groups are necessarily invalid. | ||
| 668 | if (pg.empty()) { | ||
| 669 | return false; | ||
| 670 | } | ||
| 671 | |||
| 672 | // We're going to validate that the group we'd expect is the group we see. | ||
| 673 | auto cur_it = pg.begin(); | ||
| 674 | KPhysicalAddress cur_block_address = cur_it->GetAddress(); | ||
| 675 | size_t cur_block_pages = cur_it->GetNumPages(); | ||
| 676 | |||
| 677 | auto UpdateCurrentIterator = [&]() { | ||
| 678 | if (cur_block_pages == 0) { | ||
| 679 | if ((++cur_it) == pg.end()) { | ||
| 680 | return false; | ||
| 681 | } | ||
| 682 | |||
| 683 | cur_block_address = cur_it->GetAddress(); | ||
| 684 | cur_block_pages = cur_it->GetNumPages(); | ||
| 685 | } | ||
| 686 | return true; | ||
| 687 | }; | ||
| 688 | |||
| 689 | // Begin traversal. | ||
| 690 | Common::PageTable::TraversalContext context; | ||
| 691 | Common::PageTable::TraversalEntry next_entry; | ||
| 692 | if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) { | ||
| 693 | return false; | ||
| 694 | } | ||
| 695 | |||
| 696 | // Prepare tracking variables. | ||
| 697 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 698 | size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1)); | ||
| 699 | size_t tot_size = cur_size; | ||
| 700 | |||
| 701 | // Iterate, comparing expected to actual. | ||
| 702 | while (tot_size < size) { | ||
| 703 | if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { | ||
| 704 | return false; | ||
| 705 | } | ||
| 706 | |||
| 707 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 708 | const size_t cur_pages = cur_size / PageSize; | ||
| 709 | |||
| 710 | if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) { | ||
| 711 | return false; | ||
| 712 | } | ||
| 713 | |||
| 714 | if (!UpdateCurrentIterator()) { | ||
| 715 | return false; | ||
| 716 | } | ||
| 717 | |||
| 718 | if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { | ||
| 719 | return false; | ||
| 720 | } | ||
| 721 | |||
| 722 | cur_block_address += cur_size; | ||
| 723 | cur_block_pages -= cur_pages; | ||
| 724 | cur_addr = next_entry.phys_addr; | ||
| 725 | cur_size = next_entry.block_size; | ||
| 726 | } else { | ||
| 727 | cur_size += next_entry.block_size; | ||
| 728 | } | ||
| 729 | |||
| 730 | tot_size += next_entry.block_size; | ||
| 731 | } | ||
| 732 | |||
| 733 | // Ensure we compare the right amount for the last block. | ||
| 734 | if (tot_size > size) { | ||
| 735 | cur_size -= (tot_size - size); | ||
| 736 | } | ||
| 737 | |||
| 738 | if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) { | ||
| 739 | return false; | ||
| 740 | } | ||
| 741 | |||
| 742 | if (!UpdateCurrentIterator()) { | ||
| 743 | return false; | ||
| 744 | } | ||
| 745 | |||
| 746 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); | ||
| 747 | } | ||
| 748 | |||
| 749 | Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size, | ||
| 750 | KPageTable& src_page_table, KProcessAddress src_addr) { | ||
| 751 | // Acquire the table locks. | ||
| 752 | KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock); | ||
| 753 | |||
| 754 | const size_t num_pages{size / PageSize}; | ||
| 755 | |||
| 756 | // Check that the memory is mapped in the destination process. | ||
| 757 | size_t num_allocator_blocks; | ||
| 758 | R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All, | ||
| 759 | KMemoryState::SharedCode, KMemoryPermission::UserReadWrite, | ||
| 760 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 761 | KMemoryAttribute::None)); | ||
| 762 | |||
| 763 | // Check that the memory is mapped in the source process. | ||
| 764 | R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess, | ||
| 765 | KMemoryState::FlagCanMapProcess, KMemoryPermission::None, | ||
| 766 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 767 | KMemoryAttribute::None)); | ||
| 768 | |||
| 769 | // Create an update allocator. | ||
| 770 | Result allocator_result{ResultSuccess}; | ||
| 771 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 772 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 773 | R_TRY(allocator_result); | ||
| 774 | |||
| 775 | R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 776 | |||
| 777 | // Apply the memory block update. | ||
| 778 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, | ||
| 779 | KMemoryState::Free, KMemoryPermission::None, | ||
| 780 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 781 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 782 | |||
| 783 | m_system.InvalidateCpuInstructionCaches(); | ||
| 784 | |||
| 785 | R_SUCCEED(); | ||
| 786 | } | ||
| 787 | |||
| 788 | Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 789 | KProcessAddress address, size_t size, | ||
| 790 | KMemoryPermission test_perm, KMemoryState dst_state) { | ||
| 791 | // Validate pre-conditions. | ||
| 792 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 793 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 794 | test_perm == KMemoryPermission::UserRead); | ||
| 795 | |||
| 796 | // Check that the address is in range. | ||
| 797 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 798 | |||
| 799 | // Get the source permission. | ||
| 800 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 801 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 802 | : KMemoryPermission::UserRead; | ||
| 803 | |||
| 804 | // Get aligned extents. | ||
| 805 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 806 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 807 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 808 | const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 809 | |||
| 810 | const auto aligned_src_last = (aligned_src_end)-1; | ||
| 811 | const auto mapping_src_last = (mapping_src_end)-1; | ||
| 812 | |||
| 813 | // Get the test state and attribute mask. | ||
| 814 | KMemoryState test_state; | ||
| 815 | KMemoryAttribute test_attr_mask; | ||
| 816 | switch (dst_state) { | ||
| 817 | case KMemoryState::Ipc: | ||
| 818 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 819 | test_attr_mask = | ||
| 820 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 821 | break; | ||
| 822 | case KMemoryState::NonSecureIpc: | ||
| 823 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 824 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 825 | break; | ||
| 826 | case KMemoryState::NonDeviceIpc: | ||
| 827 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 828 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 829 | break; | ||
| 830 | default: | ||
| 831 | R_THROW(ResultInvalidCombination); | ||
| 832 | } | ||
| 833 | |||
| 834 | // Ensure that on failure, we roll back appropriately. | ||
| 835 | size_t mapped_size = 0; | ||
| 836 | ON_RESULT_FAILURE { | ||
| 837 | if (mapped_size > 0) { | ||
| 838 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 839 | src_perm); | ||
| 840 | } | ||
| 841 | }; | ||
| 842 | |||
| 843 | size_t blocks_needed = 0; | ||
| 844 | |||
| 845 | // Iterate, mapping as needed. | ||
| 846 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 847 | while (true) { | ||
| 848 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 849 | |||
| 850 | // Validate the current block. | ||
| 851 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 852 | test_attr_mask, KMemoryAttribute::None)); | ||
| 853 | |||
| 854 | if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && | ||
| 855 | info.GetAddress() < GetInteger(mapping_src_end)) { | ||
| 856 | const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) | ||
| 857 | ? info.GetAddress() | ||
| 858 | : (mapping_src_start); | ||
| 859 | const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() | ||
| 860 | : (mapping_src_end); | ||
| 861 | const size_t cur_size = cur_end - cur_start; | ||
| 862 | |||
| 863 | if (info.GetAddress() < GetInteger(mapping_src_start)) { | ||
| 864 | ++blocks_needed; | ||
| 865 | } | ||
| 866 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 867 | ++blocks_needed; | ||
| 868 | } | ||
| 869 | |||
| 870 | // Set the permissions on the block, if we need to. | ||
| 871 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 872 | R_TRY(Operate(cur_start, cur_size / PageSize, src_perm, | ||
| 873 | OperationType::ChangePermissions)); | ||
| 874 | } | ||
| 875 | |||
| 876 | // Note that we mapped this part. | ||
| 877 | mapped_size += cur_size; | ||
| 878 | } | ||
| 879 | |||
| 880 | // If the block is at the end, we're done. | ||
| 881 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 882 | break; | ||
| 883 | } | ||
| 884 | |||
| 885 | // Advance. | ||
| 886 | ++it; | ||
| 887 | ASSERT(it != m_memory_block_manager.end()); | ||
| 888 | } | ||
| 889 | |||
| 890 | if (out_blocks_needed != nullptr) { | ||
| 891 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 892 | *out_blocks_needed = blocks_needed; | ||
| 893 | } | ||
| 894 | |||
| 895 | R_SUCCEED(); | ||
| 896 | } | ||
| 897 | |||
| 898 | Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size, | ||
| 899 | KProcessAddress src_addr, KMemoryPermission test_perm, | ||
| 900 | KMemoryState dst_state, KPageTable& src_page_table, | ||
| 901 | bool send) { | ||
| 902 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 903 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 904 | |||
| 905 | // Check that we can theoretically map. | ||
| 906 | const KProcessAddress region_start = m_alias_region_start; | ||
| 907 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 908 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 909 | |||
| 910 | // Get aligned source extents. | ||
| 911 | const KProcessAddress src_start = src_addr; | ||
| 912 | const KProcessAddress src_end = src_addr + size; | ||
| 913 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize); | ||
| 914 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize); | ||
| 915 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize); | ||
| 916 | const KProcessAddress mapping_src_end = | ||
| 917 | Common::AlignDown(GetInteger(src_start) + size, PageSize); | ||
| 918 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 919 | const size_t mapping_src_size = | ||
| 920 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 921 | |||
| 922 | // Select a random address to map at. | ||
| 923 | KProcessAddress dst_addr = | ||
| 924 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 925 | PageSize, 0, this->GetNumGuardPages()); | ||
| 926 | |||
| 927 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 928 | |||
| 929 | // Check that we can perform the operation we're about to perform. | ||
| 930 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 931 | |||
| 932 | // Create an update allocator. | ||
| 933 | Result allocator_result; | ||
| 934 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 935 | m_memory_block_slab_manager); | ||
| 936 | R_TRY(allocator_result); | ||
| 937 | |||
| 938 | // We're going to perform an update, so create a helper. | ||
| 939 | KScopedPageTableUpdater updater(this); | ||
| 940 | |||
| 941 | // Reserve space for any partial pages we allocate. | ||
| 942 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 943 | KScopedResourceReservation memory_reservation( | ||
| 944 | m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size); | ||
| 945 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 946 | |||
| 947 | // Ensure that we manage page references correctly. | ||
| 948 | KPhysicalAddress start_partial_page = 0; | ||
| 949 | KPhysicalAddress end_partial_page = 0; | ||
| 950 | KProcessAddress cur_mapped_addr = dst_addr; | ||
| 951 | |||
| 952 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 953 | // free on scope exit. | ||
| 954 | SCOPE_EXIT({ | ||
| 955 | if (start_partial_page != 0) { | ||
| 956 | m_system.Kernel().MemoryManager().Close(start_partial_page, 1); | ||
| 957 | } | ||
| 958 | if (end_partial_page != 0) { | ||
| 959 | m_system.Kernel().MemoryManager().Close(end_partial_page, 1); | ||
| 960 | } | ||
| 961 | }); | ||
| 962 | |||
| 963 | ON_RESULT_FAILURE { | ||
| 964 | if (cur_mapped_addr != dst_addr) { | ||
| 965 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||
| 966 | KMemoryPermission::None, OperationType::Unmap) | ||
| 967 | .IsSuccess()); | ||
| 968 | } | ||
| 969 | }; | ||
| 970 | |||
| 971 | // Allocate the start page as needed. | ||
| 972 | if (aligned_src_start < mapping_src_start) { | ||
| 973 | start_partial_page = | ||
| 974 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 975 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 976 | } | ||
| 977 | |||
| 978 | // Allocate the end page as needed. | ||
| 979 | if (mapping_src_end < aligned_src_end && | ||
| 980 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 981 | end_partial_page = | ||
| 982 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 983 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 984 | } | ||
| 985 | |||
| 986 | // Get the implementation. | ||
| 987 | auto& src_impl = src_page_table.PageTableImpl(); | ||
| 988 | |||
| 989 | // Get the fill value for partial pages. | ||
| 990 | const auto fill_val = m_ipc_fill_value; | ||
| 991 | |||
| 992 | // Begin traversal. | ||
| 993 | Common::PageTable::TraversalContext context; | ||
| 994 | Common::PageTable::TraversalEntry next_entry; | ||
| 995 | bool traverse_valid = | ||
| 996 | src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start)); | ||
| 997 | ASSERT(traverse_valid); | ||
| 998 | |||
| 999 | // Prepare tracking variables. | ||
| 1000 | KPhysicalAddress cur_block_addr = next_entry.phys_addr; | ||
| 1001 | size_t cur_block_size = | ||
| 1002 | next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); | ||
| 1003 | size_t tot_block_size = cur_block_size; | ||
| 1004 | |||
| 1005 | // Map the start page, if we have one. | ||
| 1006 | if (start_partial_page != 0) { | ||
| 1007 | // Ensure the page holds correct data. | ||
| 1008 | const KVirtualAddress start_partial_virt = | ||
| 1009 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); | ||
| 1010 | if (send) { | ||
| 1011 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 1012 | size_t copy_size, clear_size; | ||
| 1013 | if (src_end < mapping_src_start) { | ||
| 1014 | copy_size = size; | ||
| 1015 | clear_size = mapping_src_start - src_end; | ||
| 1016 | } else { | ||
| 1017 | copy_size = mapping_src_start - src_start; | ||
| 1018 | clear_size = 0; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val, | ||
| 1022 | partial_offset); | ||
| 1023 | std::memcpy( | ||
| 1024 | m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset), | ||
| 1025 | m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress( | ||
| 1026 | m_system.Kernel().MemoryLayout(), cur_block_addr)) + | ||
| 1027 | partial_offset), | ||
| 1028 | copy_size); | ||
| 1029 | if (clear_size > 0) { | ||
| 1030 | std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) + | ||
| 1031 | partial_offset + copy_size), | ||
| 1032 | fill_val, clear_size); | ||
| 1033 | } | ||
| 1034 | } else { | ||
| 1035 | std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val, | ||
| 1036 | PageSize); | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | // Map the page. | ||
| 1040 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||
| 1041 | |||
| 1042 | // Update tracking extents. | ||
| 1043 | cur_mapped_addr += PageSize; | ||
| 1044 | cur_block_addr += PageSize; | ||
| 1045 | cur_block_size -= PageSize; | ||
| 1046 | |||
| 1047 | // If the block's size was one page, we may need to continue traversal. | ||
| 1048 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 1049 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1050 | ASSERT(traverse_valid); | ||
| 1051 | |||
| 1052 | cur_block_addr = next_entry.phys_addr; | ||
| 1053 | cur_block_size = next_entry.block_size; | ||
| 1054 | tot_block_size += next_entry.block_size; | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | // Map the remaining pages. | ||
| 1059 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 1060 | // Continue the traversal. | ||
| 1061 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1062 | ASSERT(traverse_valid); | ||
| 1063 | |||
| 1064 | // Process the block. | ||
| 1065 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 1066 | // Map the block we've been processing so far. | ||
| 1067 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1068 | cur_block_addr)); | ||
| 1069 | |||
| 1070 | // Update tracking extents. | ||
| 1071 | cur_mapped_addr += cur_block_size; | ||
| 1072 | cur_block_addr = next_entry.phys_addr; | ||
| 1073 | cur_block_size = next_entry.block_size; | ||
| 1074 | } else { | ||
| 1075 | cur_block_size += next_entry.block_size; | ||
| 1076 | } | ||
| 1077 | tot_block_size += next_entry.block_size; | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | // Handle the last direct-mapped page. | ||
| 1081 | if (const KProcessAddress mapped_block_end = | ||
| 1082 | aligned_src_start + tot_block_size - cur_block_size; | ||
| 1083 | mapped_block_end < mapping_src_end) { | ||
| 1084 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 1085 | |||
| 1086 | // Map the last block. | ||
| 1087 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1088 | cur_block_addr)); | ||
| 1089 | |||
| 1090 | // Update tracking extents. | ||
| 1091 | cur_mapped_addr += last_block_size; | ||
| 1092 | cur_block_addr += last_block_size; | ||
| 1093 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 1094 | cur_block_size == last_block_size) { | ||
| 1095 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1096 | ASSERT(traverse_valid); | ||
| 1097 | |||
| 1098 | cur_block_addr = next_entry.phys_addr; | ||
| 1099 | } | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | // Map the end page, if we have one. | ||
| 1103 | if (end_partial_page != 0) { | ||
| 1104 | // Ensure the page holds correct data. | ||
| 1105 | const KVirtualAddress end_partial_virt = | ||
| 1106 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); | ||
| 1107 | if (send) { | ||
| 1108 | const size_t copy_size = src_end - mapping_src_end; | ||
| 1109 | std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), | ||
| 1110 | m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress( | ||
| 1111 | m_system.Kernel().MemoryLayout(), cur_block_addr))), | ||
| 1112 | copy_size); | ||
| 1113 | std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size), | ||
| 1114 | fill_val, PageSize - copy_size); | ||
| 1115 | } else { | ||
| 1116 | std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val, | ||
| 1117 | PageSize); | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | // Map the page. | ||
| 1121 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | // Update memory blocks to reflect our changes | ||
| 1125 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 1126 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 1127 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1128 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1129 | |||
| 1130 | // Set the output address. | ||
| 1131 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 1132 | |||
| 1133 | // We succeeded. | ||
| 1134 | memory_reservation.Commit(); | ||
| 1135 | R_SUCCEED(); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 1139 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 1140 | KMemoryState dst_state, bool send) { | ||
| 1141 | // For convenience, alias this. | ||
| 1142 | KPageTable& dst_page_table = *this; | ||
| 1143 | |||
| 1144 | // Acquire the table locks. | ||
| 1145 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 1146 | |||
| 1147 | // We're going to perform an update, so create a helper. | ||
| 1148 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 1149 | |||
| 1150 | // Perform client setup. | ||
| 1151 | size_t num_allocator_blocks; | ||
| 1152 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 1153 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 1154 | test_perm, dst_state)); | ||
| 1155 | |||
| 1156 | // Create an update allocator. | ||
| 1157 | Result allocator_result; | ||
| 1158 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1159 | src_page_table.m_memory_block_slab_manager, | ||
| 1160 | num_allocator_blocks); | ||
| 1161 | R_TRY(allocator_result); | ||
| 1162 | |||
| 1163 | // Get the mapped extents. | ||
| 1164 | const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize); | ||
| 1165 | const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize); | ||
| 1166 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 1167 | |||
| 1168 | // Ensure that we clean up appropriately if we fail after this. | ||
| 1169 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 1170 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 1171 | : KMemoryPermission::UserRead; | ||
| 1172 | ON_RESULT_FAILURE { | ||
| 1173 | if (src_map_end > src_map_start) { | ||
| 1174 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 1175 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 1176 | } | ||
| 1177 | }; | ||
| 1178 | |||
| 1179 | // Perform server setup. | ||
| 1180 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 1181 | src_page_table, send)); | ||
| 1182 | |||
| 1183 | // If anything was mapped, ipc-lock the pages. | ||
| 1184 | if (src_map_start < src_map_end) { | ||
| 1185 | // Get the source permission. | ||
| 1186 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 1187 | (src_map_end - src_map_start) / PageSize, | ||
| 1188 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 1189 | } | ||
| 1190 | |||
| 1191 | R_SUCCEED(); | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size, | ||
| 1195 | KMemoryState dst_state) { | ||
| 1196 | // Validate the address. | ||
| 1197 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1198 | |||
| 1199 | // Lock the table. | ||
| 1200 | KScopedLightLock lk(m_general_lock); | ||
| 1201 | |||
| 1202 | // Validate the memory state. | ||
| 1203 | size_t num_allocator_blocks; | ||
| 1204 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1205 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 1206 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 1207 | KMemoryAttribute::None)); | ||
| 1208 | |||
| 1209 | // Create an update allocator. | ||
| 1210 | Result allocator_result; | ||
| 1211 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1212 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1213 | R_TRY(allocator_result); | ||
| 1214 | |||
| 1215 | // We're going to perform an update, so create a helper. | ||
| 1216 | KScopedPageTableUpdater updater(this); | ||
| 1217 | |||
| 1218 | // Get aligned extents. | ||
| 1219 | const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 1220 | const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 1221 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 1222 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 1223 | |||
| 1224 | // Unmap the pages. | ||
| 1225 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 1226 | |||
| 1227 | // Update memory blocks. | ||
| 1228 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 1229 | KMemoryState::None, KMemoryPermission::None, | ||
| 1230 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1231 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1232 | |||
| 1233 | // Release from the resource limit as relevant. | ||
| 1234 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 1235 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 1236 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 1237 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size); | ||
| 1238 | |||
| 1239 | R_SUCCEED(); | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size, | ||
| 1243 | KMemoryState dst_state) { | ||
| 1244 | // Validate the address. | ||
| 1245 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1246 | |||
| 1247 | // Get aligned source extents. | ||
| 1248 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 1249 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 1250 | const KProcessAddress mapping_last = mapping_end - 1; | ||
| 1251 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 1252 | |||
| 1253 | // If nothing was mapped, we're actually done immediately. | ||
| 1254 | R_SUCCEED_IF(mapping_size == 0); | ||
| 1255 | |||
| 1256 | // Get the test state and attribute mask. | ||
| 1257 | KMemoryState test_state; | ||
| 1258 | KMemoryAttribute test_attr_mask; | ||
| 1259 | switch (dst_state) { | ||
| 1260 | case KMemoryState::Ipc: | ||
| 1261 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 1262 | test_attr_mask = | ||
| 1263 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 1264 | break; | ||
| 1265 | case KMemoryState::NonSecureIpc: | ||
| 1266 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 1267 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1268 | break; | ||
| 1269 | case KMemoryState::NonDeviceIpc: | ||
| 1270 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 1271 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1272 | break; | ||
| 1273 | default: | ||
| 1274 | R_THROW(ResultInvalidCombination); | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | // Lock the table. | ||
| 1278 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 1279 | // convention elsewhere in KPageTable. | ||
| 1280 | KScopedLightLock lk(m_general_lock); | ||
| 1281 | |||
| 1282 | // We're going to perform an update, so create a helper. | ||
| 1283 | KScopedPageTableUpdater updater(this); | ||
| 1284 | |||
| 1285 | // Ensure that on failure, we roll back appropriately. | ||
| 1286 | size_t mapped_size = 0; | ||
| 1287 | ON_RESULT_FAILURE { | ||
| 1288 | if (mapped_size > 0) { | ||
| 1289 | // Determine where the mapping ends. | ||
| 1290 | const auto mapped_end = (mapping_start) + mapped_size; | ||
| 1291 | const auto mapped_last = mapped_end - 1; | ||
| 1292 | |||
| 1293 | // Get current and next iterators. | ||
| 1294 | KMemoryBlockManager::const_iterator start_it = | ||
| 1295 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1296 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1297 | ++next_it; | ||
| 1298 | |||
| 1299 | // Get the current block info. | ||
| 1300 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1301 | |||
| 1302 | // Create tracking variables. | ||
| 1303 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 1304 | size_t cur_size = cur_info.GetSize(); | ||
| 1305 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1306 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1307 | bool first = | ||
| 1308 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1309 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1310 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1311 | |||
| 1312 | while (((cur_address) + cur_size - 1) < mapped_last) { | ||
| 1313 | // Check that we have a next block. | ||
| 1314 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1315 | |||
| 1316 | // Get the next info. | ||
| 1317 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1318 | |||
| 1319 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1320 | |||
| 1321 | const bool next_perm_eq = | ||
| 1322 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1323 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1324 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1325 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1326 | // We can consolidate the reprotection for the current and next block into a | ||
| 1327 | // single call. | ||
| 1328 | cur_size += next_info.GetSize(); | ||
| 1329 | } else { | ||
| 1330 | // We have to operate on the current block. | ||
| 1331 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1332 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1333 | OperationType::ChangePermissions) | ||
| 1334 | .IsSuccess()); | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | // Advance. | ||
| 1338 | cur_address = next_info.GetAddress(); | ||
| 1339 | cur_size = next_info.GetSize(); | ||
| 1340 | first = false; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | // Advance. | ||
| 1344 | cur_info = next_info; | ||
| 1345 | cur_perm_eq = next_perm_eq; | ||
| 1346 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1347 | ++next_it; | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | // Process the last block. | ||
| 1351 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 1352 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1353 | OperationType::ChangePermissions) | ||
| 1354 | .IsSuccess()); | ||
| 1355 | } | ||
| 1356 | } | ||
| 1357 | }; | ||
| 1358 | |||
| 1359 | // Iterate, reprotecting as needed. | ||
| 1360 | { | ||
| 1361 | // Get current and next iterators. | ||
| 1362 | KMemoryBlockManager::const_iterator start_it = | ||
| 1363 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1364 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1365 | ++next_it; | ||
| 1366 | |||
| 1367 | // Validate the current block. | ||
| 1368 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1369 | ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None, | ||
| 1370 | KMemoryPermission::None, | ||
| 1371 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1372 | KMemoryAttribute::IpcLocked) | ||
| 1373 | .IsSuccess()); | ||
| 1374 | |||
| 1375 | // Create tracking variables. | ||
| 1376 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 1377 | size_t cur_size = cur_info.GetSize(); | ||
| 1378 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1379 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1380 | bool first = | ||
| 1381 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1382 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1383 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1384 | |||
| 1385 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 1386 | // Check that we have a next block. | ||
| 1387 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1388 | |||
| 1389 | // Get the next info. | ||
| 1390 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1391 | |||
| 1392 | // Validate the next block. | ||
| 1393 | ASSERT(this->CheckMemoryState(next_info, test_state, test_state, | ||
| 1394 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1395 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1396 | KMemoryAttribute::IpcLocked) | ||
| 1397 | .IsSuccess()); | ||
| 1398 | |||
| 1399 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1400 | const bool next_perm_eq = | ||
| 1401 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1402 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1403 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1404 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1405 | // We can consolidate the reprotection for the current and next block into a single | ||
| 1406 | // call. | ||
| 1407 | cur_size += next_info.GetSize(); | ||
| 1408 | } else { | ||
| 1409 | // We have to operate on the current block. | ||
| 1410 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1411 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1412 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1413 | : cur_info.GetPermission(), | ||
| 1414 | OperationType::ChangePermissions)); | ||
| 1415 | } | ||
| 1416 | |||
| 1417 | // Mark that we mapped the block. | ||
| 1418 | mapped_size += cur_size; | ||
| 1419 | |||
| 1420 | // Advance. | ||
| 1421 | cur_address = next_info.GetAddress(); | ||
| 1422 | cur_size = next_info.GetSize(); | ||
| 1423 | first = false; | ||
| 1424 | } | ||
| 1425 | |||
| 1426 | // Advance. | ||
| 1427 | cur_info = next_info; | ||
| 1428 | cur_perm_eq = next_perm_eq; | ||
| 1429 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1430 | ++next_it; | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | // Process the last block. | ||
| 1434 | const auto lock_count = | ||
| 1435 | cur_info.GetIpcLockCount() + | ||
| 1436 | (next_it != m_memory_block_manager.end() | ||
| 1437 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 1438 | : 0); | ||
| 1439 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 1440 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1441 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1442 | : cur_info.GetPermission(), | ||
| 1443 | OperationType::ChangePermissions)); | ||
| 1444 | } | ||
| 1445 | } | ||
| 1446 | |||
| 1447 | // Create an update allocator. | ||
| 1448 | // NOTE: Guaranteed zero blocks needed here. | ||
| 1449 | Result allocator_result; | ||
| 1450 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1451 | m_memory_block_slab_manager, 0); | ||
| 1452 | R_TRY(allocator_result); | ||
| 1453 | |||
| 1454 | // Unlock the pages. | ||
| 1455 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 1456 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 1457 | KMemoryPermission::None); | ||
| 1458 | |||
| 1459 | R_SUCCEED(); | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, | ||
| 1463 | KProcessAddress address, size_t size, | ||
| 1464 | KMemoryPermission prot_perm) { | ||
| 1465 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1466 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); | ||
| 1467 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 1468 | |||
| 1469 | // Get the mapped extents. | ||
| 1470 | const KProcessAddress src_map_start = address; | ||
| 1471 | const KProcessAddress src_map_end = address + size; | ||
| 1472 | const KProcessAddress src_map_last = src_map_end - 1; | ||
| 1473 | |||
| 1474 | // This function is only invoked when there's something to do. | ||
| 1475 | ASSERT(src_map_end > src_map_start); | ||
| 1476 | |||
| 1477 | // Iterate over blocks, fixing permissions. | ||
| 1478 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 1479 | while (true) { | ||
| 1480 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1481 | |||
| 1482 | const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) | ||
| 1483 | ? info.GetAddress() | ||
| 1484 | : GetInteger(src_map_start); | ||
| 1485 | const auto cur_end = | ||
| 1486 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 1487 | |||
| 1488 | // If we can, fix the protections on the block. | ||
| 1489 | if ((info.GetIpcLockCount() == 0 && | ||
| 1490 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 1491 | (info.GetIpcLockCount() != 0 && | ||
| 1492 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 1493 | // Check if we actually need to fix the protections on the block. | ||
| 1494 | if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || | ||
| 1495 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 1496 | ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), | ||
| 1497 | OperationType::ChangePermissions) | ||
| 1498 | .IsSuccess()); | ||
| 1499 | } | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | // If we're past the end of the region, we're done. | ||
| 1503 | if (src_map_last <= info.GetLastAddress()) { | ||
| 1504 | break; | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | // Advance. | ||
| 1508 | ++it; | ||
| 1509 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1510 | } | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 1514 | // Lock the physical memory lock. | ||
| 1515 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 1516 | |||
| 1517 | // Calculate the last address for convenience. | ||
| 1518 | const KProcessAddress last_address = address + size - 1; | ||
| 1519 | |||
| 1520 | // Define iteration variables. | ||
| 1521 | KProcessAddress cur_address; | ||
| 1522 | size_t mapped_size; | ||
| 1523 | |||
| 1524 | // The entire mapping process can be retried. | ||
| 1525 | while (true) { | ||
| 1526 | // Check if the memory is already mapped. | ||
| 1527 | { | ||
| 1528 | // Lock the table. | ||
| 1529 | KScopedLightLock lk(m_general_lock); | ||
| 1530 | |||
| 1531 | // Iterate over the memory. | ||
| 1532 | cur_address = address; | ||
| 1533 | mapped_size = 0; | ||
| 1534 | |||
| 1535 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1536 | while (true) { | ||
| 1537 | // Check that the iterator is valid. | ||
| 1538 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1539 | |||
| 1540 | // Get the memory info. | ||
| 1541 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1542 | |||
| 1543 | // Check if we're done. | ||
| 1544 | if (last_address <= info.GetLastAddress()) { | ||
| 1545 | if (info.GetState() != KMemoryState::Free) { | ||
| 1546 | mapped_size += (last_address + 1 - cur_address); | ||
| 1547 | } | ||
| 1548 | break; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | // Track the memory if it's mapped. | ||
| 1552 | if (info.GetState() != KMemoryState::Free) { | ||
| 1553 | mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 1554 | } | ||
| 1555 | |||
| 1556 | // Advance. | ||
| 1557 | cur_address = info.GetEndAddress(); | ||
| 1558 | ++it; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | // If the size mapped is the size requested, we've nothing to do. | ||
| 1562 | R_SUCCEED_IF(size == mapped_size); | ||
| 1563 | } | ||
| 1564 | |||
| 1565 | // Allocate and map the memory. | ||
| 1566 | { | ||
| 1567 | // Reserve the memory from the process resource limit. | ||
| 1568 | KScopedResourceReservation memory_reservation( | ||
| 1569 | m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size); | ||
| 1570 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 1571 | |||
| 1572 | // Allocate pages for the new memory. | ||
| 1573 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 1574 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( | ||
| 1575 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); | ||
| 1576 | |||
| 1577 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 1578 | // auto pg_guard = SCOPE_GUARD { | ||
| 1579 | // pg.OpenFirst(); | ||
| 1580 | // pg.Close(); | ||
| 1581 | //}; | ||
| 1582 | |||
| 1583 | // Map the memory. | ||
| 1584 | { | ||
| 1585 | // Lock the table. | ||
| 1586 | KScopedLightLock lk(m_general_lock); | ||
| 1587 | |||
| 1588 | size_t num_allocator_blocks = 0; | ||
| 1589 | |||
| 1590 | // Verify that nobody has mapped memory since we first checked. | ||
| 1591 | { | ||
| 1592 | // Iterate over the memory. | ||
| 1593 | size_t checked_mapped_size = 0; | ||
| 1594 | cur_address = address; | ||
| 1595 | |||
| 1596 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1597 | while (true) { | ||
| 1598 | // Check that the iterator is valid. | ||
| 1599 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1600 | |||
| 1601 | // Get the memory info. | ||
| 1602 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1603 | |||
| 1604 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 1605 | if (is_free) { | ||
| 1606 | if (info.GetAddress() < GetInteger(address)) { | ||
| 1607 | ++num_allocator_blocks; | ||
| 1608 | } | ||
| 1609 | if (last_address < info.GetLastAddress()) { | ||
| 1610 | ++num_allocator_blocks; | ||
| 1611 | } | ||
| 1612 | } | ||
| 1613 | |||
| 1614 | // Check if we're done. | ||
| 1615 | if (last_address <= info.GetLastAddress()) { | ||
| 1616 | if (!is_free) { | ||
| 1617 | checked_mapped_size += (last_address + 1 - cur_address); | ||
| 1618 | } | ||
| 1619 | break; | ||
| 1620 | } | ||
| 1621 | |||
| 1622 | // Track the memory if it's mapped. | ||
| 1623 | if (!is_free) { | ||
| 1624 | checked_mapped_size += | ||
| 1625 | KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | // Advance. | ||
| 1629 | cur_address = info.GetEndAddress(); | ||
| 1630 | ++it; | ||
| 1631 | } | ||
| 1632 | |||
| 1633 | // If the size now isn't what it was before, somebody mapped or unmapped | ||
| 1634 | // concurrently. If this happened, retry. | ||
| 1635 | if (mapped_size != checked_mapped_size) { | ||
| 1636 | continue; | ||
| 1637 | } | ||
| 1638 | } | ||
| 1639 | |||
| 1640 | // Create an update allocator. | ||
| 1641 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 1642 | Result allocator_result; | ||
| 1643 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1644 | m_memory_block_slab_manager, | ||
| 1645 | num_allocator_blocks); | ||
| 1646 | R_TRY(allocator_result); | ||
| 1647 | |||
| 1648 | // We're going to perform an update, so create a helper. | ||
| 1649 | KScopedPageTableUpdater updater(this); | ||
| 1650 | |||
| 1651 | // Prepare to iterate over the memory. | ||
| 1652 | auto pg_it = pg.begin(); | ||
| 1653 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 1654 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1655 | |||
| 1656 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 1657 | // pg_guard.Cancel(); | ||
| 1658 | cur_address = address; | ||
| 1659 | ON_RESULT_FAILURE { | ||
| 1660 | if (cur_address > address) { | ||
| 1661 | const KProcessAddress last_unmap_address = cur_address - 1; | ||
| 1662 | |||
| 1663 | // Iterate, unmapping the pages. | ||
| 1664 | cur_address = address; | ||
| 1665 | |||
| 1666 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1667 | while (true) { | ||
| 1668 | // Check that the iterator is valid. | ||
| 1669 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1670 | |||
| 1671 | // Get the memory info. | ||
| 1672 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1673 | |||
| 1674 | // If the memory state is free, we mapped it and need to unmap it. | ||
| 1675 | if (info.GetState() == KMemoryState::Free) { | ||
| 1676 | // Determine the range to unmap. | ||
| 1677 | const size_t cur_pages = | ||
| 1678 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 1679 | last_unmap_address + 1 - cur_address) / | ||
| 1680 | PageSize; | ||
| 1681 | |||
| 1682 | // Unmap. | ||
| 1683 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||
| 1684 | OperationType::Unmap) | ||
| 1685 | .IsSuccess()); | ||
| 1686 | } | ||
| 1687 | |||
| 1688 | // Check if we're done. | ||
| 1689 | if (last_unmap_address <= info.GetLastAddress()) { | ||
| 1690 | break; | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | // Advance. | ||
| 1694 | cur_address = info.GetEndAddress(); | ||
| 1695 | ++it; | ||
| 1696 | } | ||
| 1697 | } | ||
| 1698 | |||
| 1699 | // Release any remaining unmapped memory. | ||
| 1700 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||
| 1701 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); | ||
| 1702 | for (++pg_it; pg_it != pg.end(); ++pg_it) { | ||
| 1703 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 1704 | pg_it->GetNumPages()); | ||
| 1705 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||
| 1706 | pg_it->GetNumPages()); | ||
| 1707 | } | ||
| 1708 | }; | ||
| 1709 | |||
| 1710 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1711 | while (true) { | ||
| 1712 | // Check that the iterator is valid. | ||
| 1713 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1714 | |||
| 1715 | // Get the memory info. | ||
| 1716 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1717 | |||
| 1718 | // If it's unmapped, we need to map it. | ||
| 1719 | if (info.GetState() == KMemoryState::Free) { | ||
| 1720 | // Determine the range to map. | ||
| 1721 | size_t map_pages = | ||
| 1722 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 1723 | last_address + 1 - cur_address) / | ||
| 1724 | PageSize; | ||
| 1725 | |||
| 1726 | // While we have pages to map, map them. | ||
| 1727 | { | ||
| 1728 | // Create a page group for the current mapping range. | ||
| 1729 | KPageGroup cur_pg(m_kernel, m_block_info_manager); | ||
| 1730 | { | ||
| 1731 | ON_RESULT_FAILURE_2 { | ||
| 1732 | cur_pg.OpenFirst(); | ||
| 1733 | cur_pg.Close(); | ||
| 1734 | }; | ||
| 1735 | |||
| 1736 | size_t remain_pages = map_pages; | ||
| 1737 | while (remain_pages > 0) { | ||
| 1738 | // Check if we're at the end of the physical block. | ||
| 1739 | if (pg_pages == 0) { | ||
| 1740 | // Ensure there are more pages to map. | ||
| 1741 | ASSERT(pg_it != pg.end()); | ||
| 1742 | |||
| 1743 | // Advance our physical block. | ||
| 1744 | ++pg_it; | ||
| 1745 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1746 | pg_pages = pg_it->GetNumPages(); | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | // Add whatever we can to the current block. | ||
| 1750 | const size_t cur_pages = std::min(pg_pages, remain_pages); | ||
| 1751 | R_TRY(cur_pg.AddBlock(pg_phys_addr + | ||
| 1752 | ((pg_pages - cur_pages) * PageSize), | ||
| 1753 | cur_pages)); | ||
| 1754 | |||
| 1755 | // Advance. | ||
| 1756 | remain_pages -= cur_pages; | ||
| 1757 | pg_pages -= cur_pages; | ||
| 1758 | } | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | // Map the pages. | ||
| 1762 | R_TRY(this->Operate(cur_address, map_pages, cur_pg, | ||
| 1763 | OperationType::MapFirstGroup)); | ||
| 1764 | } | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | // Check if we're done. | ||
| 1768 | if (last_address <= info.GetLastAddress()) { | ||
| 1769 | break; | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | // Advance. | ||
| 1773 | cur_address = info.GetEndAddress(); | ||
| 1774 | ++it; | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | // We succeeded, so commit the memory reservation. | ||
| 1778 | memory_reservation.Commit(); | ||
| 1779 | |||
| 1780 | // Increase our tracked mapped size. | ||
| 1781 | m_mapped_physical_memory_size += (size - mapped_size); | ||
| 1782 | |||
| 1783 | // Update the relevant memory blocks. | ||
| 1784 | m_memory_block_manager.UpdateIfMatch( | ||
| 1785 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, | ||
| 1786 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | ||
| 1787 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1788 | address == this->GetAliasRegionStart() | ||
| 1789 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1790 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 1791 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1792 | |||
| 1793 | R_SUCCEED(); | ||
| 1794 | } | ||
| 1795 | } | ||
| 1796 | } | ||
| 1797 | } | ||
| 1798 | |||
| 1799 | Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 1800 | // Lock the physical memory lock. | ||
| 1801 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 1802 | |||
| 1803 | // Lock the table. | ||
| 1804 | KScopedLightLock lk(m_general_lock); | ||
| 1805 | |||
| 1806 | // Calculate the last address for convenience. | ||
| 1807 | const KProcessAddress last_address = address + size - 1; | ||
| 1808 | |||
| 1809 | // Define iteration variables. | ||
| 1810 | KProcessAddress map_start_address = 0; | ||
| 1811 | KProcessAddress map_last_address = 0; | ||
| 1812 | |||
| 1813 | KProcessAddress cur_address; | ||
| 1814 | size_t mapped_size; | ||
| 1815 | size_t num_allocator_blocks = 0; | ||
| 1816 | |||
| 1817 | // Check if the memory is mapped. | ||
| 1818 | { | ||
| 1819 | // Iterate over the memory. | ||
| 1820 | cur_address = address; | ||
| 1821 | mapped_size = 0; | ||
| 1822 | |||
| 1823 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1824 | while (true) { | ||
| 1825 | // Check that the iterator is valid. | ||
| 1826 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1827 | |||
| 1828 | // Get the memory info. | ||
| 1829 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1830 | |||
| 1831 | // Verify the memory's state. | ||
| 1832 | const bool is_normal = info.GetState() == KMemoryState::Normal && | ||
| 1833 | info.GetAttribute() == KMemoryAttribute::None; | ||
| 1834 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 1835 | R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); | ||
| 1836 | |||
| 1837 | if (is_normal) { | ||
| 1838 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | ||
| 1839 | |||
| 1840 | if (map_start_address == 0) { | ||
| 1841 | map_start_address = cur_address; | ||
| 1842 | } | ||
| 1843 | map_last_address = | ||
| 1844 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 1845 | |||
| 1846 | if (info.GetAddress() < GetInteger(address)) { | ||
| 1847 | ++num_allocator_blocks; | ||
| 1848 | } | ||
| 1849 | if (last_address < info.GetLastAddress()) { | ||
| 1850 | ++num_allocator_blocks; | ||
| 1851 | } | ||
| 1852 | |||
| 1853 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | // Check if we're done. | ||
| 1857 | if (last_address <= info.GetLastAddress()) { | ||
| 1858 | break; | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | // Advance. | ||
| 1862 | cur_address = info.GetEndAddress(); | ||
| 1863 | ++it; | ||
| 1864 | } | ||
| 1865 | |||
| 1866 | // If there's nothing mapped, we've nothing to do. | ||
| 1867 | R_SUCCEED_IF(mapped_size == 0); | ||
| 1868 | } | ||
| 1869 | |||
| 1870 | // Create an update allocator. | ||
| 1871 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 1872 | Result allocator_result; | ||
| 1873 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1874 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1875 | R_TRY(allocator_result); | ||
| 1876 | |||
| 1877 | // We're going to perform an update, so create a helper. | ||
| 1878 | KScopedPageTableUpdater updater(this); | ||
| 1879 | |||
| 1880 | // Separate the mapping. | ||
| 1881 | R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, | ||
| 1882 | KMemoryPermission::None, OperationType::Separate)); | ||
| 1883 | |||
| 1884 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 1885 | cur_address = address; | ||
| 1886 | |||
| 1887 | // Iterate over the memory, unmapping as we go. | ||
| 1888 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1889 | |||
| 1890 | const auto clear_merge_attr = | ||
| 1891 | (it->GetState() == KMemoryState::Normal && | ||
| 1892 | it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) | ||
| 1893 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 1894 | : KMemoryBlockDisableMergeAttribute::None; | ||
| 1895 | |||
| 1896 | while (true) { | ||
| 1897 | // Check that the iterator is valid. | ||
| 1898 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1899 | |||
| 1900 | // Get the memory info. | ||
| 1901 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1902 | |||
| 1903 | // If the memory state is normal, we need to unmap it. | ||
| 1904 | if (info.GetState() == KMemoryState::Normal) { | ||
| 1905 | // Determine the range to unmap. | ||
| 1906 | const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 1907 | last_address + 1 - cur_address) / | ||
| 1908 | PageSize; | ||
| 1909 | |||
| 1910 | // Unmap. | ||
| 1911 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) | ||
| 1912 | .IsSuccess()); | ||
| 1913 | } | ||
| 1914 | |||
| 1915 | // Check if we're done. | ||
| 1916 | if (last_address <= info.GetLastAddress()) { | ||
| 1917 | break; | ||
| 1918 | } | ||
| 1919 | |||
| 1920 | // Advance. | ||
| 1921 | cur_address = info.GetEndAddress(); | ||
| 1922 | ++it; | ||
| 1923 | } | ||
| 1924 | |||
| 1925 | // Release the memory resource. | ||
| 1926 | m_mapped_physical_memory_size -= mapped_size; | ||
| 1927 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size); | ||
| 1928 | |||
| 1929 | // Update memory blocks. | ||
| 1930 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | ||
| 1931 | KMemoryState::Free, KMemoryPermission::None, | ||
| 1932 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1933 | clear_merge_attr); | ||
| 1934 | |||
| 1935 | // We succeeded. | ||
| 1936 | R_SUCCEED(); | ||
| 1937 | } | ||
| 1938 | |||
| 1939 | Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1940 | size_t size) { | ||
| 1941 | // Lock the table. | ||
| 1942 | KScopedLightLock lk(m_general_lock); | ||
| 1943 | |||
| 1944 | // Validate that the source address's state is valid. | ||
| 1945 | KMemoryState src_state; | ||
| 1946 | size_t num_src_allocator_blocks; | ||
| 1947 | R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, | ||
| 1948 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1949 | KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 1950 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 1951 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1952 | |||
| 1953 | // Validate that the dst address's state is valid. | ||
| 1954 | size_t num_dst_allocator_blocks; | ||
| 1955 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, | ||
| 1956 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1957 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1958 | KMemoryAttribute::None)); | ||
| 1959 | |||
| 1960 | // Create an update allocator for the source. | ||
| 1961 | Result src_allocator_result; | ||
| 1962 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1963 | m_memory_block_slab_manager, | ||
| 1964 | num_src_allocator_blocks); | ||
| 1965 | R_TRY(src_allocator_result); | ||
| 1966 | |||
| 1967 | // Create an update allocator for the destination. | ||
| 1968 | Result dst_allocator_result; | ||
| 1969 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1970 | m_memory_block_slab_manager, | ||
| 1971 | num_dst_allocator_blocks); | ||
| 1972 | R_TRY(dst_allocator_result); | ||
| 1973 | |||
| 1974 | // Map the memory. | ||
| 1975 | { | ||
| 1976 | // Determine the number of pages being operated on. | ||
| 1977 | const size_t num_pages = size / PageSize; | ||
| 1978 | |||
| 1979 | // Create page groups for the memory being unmapped. | ||
| 1980 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 1981 | |||
| 1982 | // Create the page group representing the source. | ||
| 1983 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 1984 | |||
| 1985 | // We're going to perform an update, so create a helper. | ||
| 1986 | KScopedPageTableUpdater updater(this); | ||
| 1987 | |||
| 1988 | // Reprotect the source as kernel-read/not mapped. | ||
| 1989 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||
| 1990 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 1991 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||
| 1992 | const KPageProperties src_properties = {new_src_perm, false, false, | ||
| 1993 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 1994 | R_TRY(this->Operate(src_address, num_pages, src_properties.perm, | ||
| 1995 | OperationType::ChangePermissions)); | ||
| 1996 | |||
| 1997 | // Ensure that we unprotect the source pages on failure. | ||
| 1998 | ON_RESULT_FAILURE { | ||
| 1999 | const KPageProperties unprotect_properties = { | ||
| 2000 | KMemoryPermission::UserReadWrite, false, false, | ||
| 2001 | DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 2002 | ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm, | ||
| 2003 | OperationType::ChangePermissions) == ResultSuccess); | ||
| 2004 | }; | ||
| 2005 | |||
| 2006 | // Map the alias pages. | ||
| 2007 | const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 2008 | DisableMergeAttribute::DisableHead}; | ||
| 2009 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, | ||
| 2010 | false)); | ||
| 2011 | |||
| 2012 | // Apply the memory block updates. | ||
| 2013 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 2014 | src_state, new_src_perm, new_src_attr, | ||
| 2015 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 2016 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2017 | m_memory_block_manager.Update( | ||
| 2018 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, | ||
| 2019 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2020 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | R_SUCCEED(); | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 2027 | size_t size) { | ||
| 2028 | // Lock the table. | ||
| 2029 | KScopedLightLock lk(m_general_lock); | ||
| 2030 | |||
| 2031 | // Validate that the source address's state is valid. | ||
| 2032 | KMemoryState src_state; | ||
| 2033 | size_t num_src_allocator_blocks; | ||
| 2034 | R_TRY(this->CheckMemoryState( | ||
| 2035 | std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), | ||
| 2036 | src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 2037 | KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, | ||
| 2038 | KMemoryAttribute::All, KMemoryAttribute::Locked)); | ||
| 2039 | |||
| 2040 | // Validate that the dst address's state is valid. | ||
| 2041 | KMemoryPermission dst_perm; | ||
| 2042 | size_t num_dst_allocator_blocks; | ||
| 2043 | R_TRY(this->CheckMemoryState( | ||
| 2044 | nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), | ||
| 2045 | dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, | ||
| 2046 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2047 | |||
| 2048 | // Create an update allocator for the source. | ||
| 2049 | Result src_allocator_result; | ||
| 2050 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 2051 | m_memory_block_slab_manager, | ||
| 2052 | num_src_allocator_blocks); | ||
| 2053 | R_TRY(src_allocator_result); | ||
| 2054 | |||
| 2055 | // Create an update allocator for the destination. | ||
| 2056 | Result dst_allocator_result; | ||
| 2057 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 2058 | m_memory_block_slab_manager, | ||
| 2059 | num_dst_allocator_blocks); | ||
| 2060 | R_TRY(dst_allocator_result); | ||
| 2061 | |||
| 2062 | // Unmap the memory. | ||
| 2063 | { | ||
| 2064 | // Determine the number of pages being operated on. | ||
| 2065 | const size_t num_pages = size / PageSize; | ||
| 2066 | |||
| 2067 | // Create page groups for the memory being unmapped. | ||
| 2068 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2069 | |||
| 2070 | // Create the page group representing the destination. | ||
| 2071 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 2072 | |||
| 2073 | // Ensure the page group is the valid for the source. | ||
| 2074 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||
| 2075 | |||
| 2076 | // We're going to perform an update, so create a helper. | ||
| 2077 | KScopedPageTableUpdater updater(this); | ||
| 2078 | |||
| 2079 | // Unmap the aliased copy of the pages. | ||
| 2080 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2081 | DisableMergeAttribute::None}; | ||
| 2082 | R_TRY( | ||
| 2083 | this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap)); | ||
| 2084 | |||
| 2085 | // Ensure that we re-map the aliased pages on failure. | ||
| 2086 | ON_RESULT_FAILURE { | ||
| 2087 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 2088 | }; | ||
| 2089 | |||
| 2090 | // Try to set the permissions for the source pages back to what they should be. | ||
| 2091 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 2092 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 2093 | R_TRY(this->Operate(src_address, num_pages, src_properties.perm, | ||
| 2094 | OperationType::ChangePermissions)); | ||
| 2095 | |||
| 2096 | // Apply the memory block updates. | ||
| 2097 | m_memory_block_manager.Update( | ||
| 2098 | std::addressof(src_allocator), src_address, num_pages, src_state, | ||
| 2099 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2100 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 2101 | m_memory_block_manager.Update( | ||
| 2102 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 2103 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2104 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2105 | } | ||
| 2106 | |||
| 2107 | R_SUCCEED(); | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 2111 | size_t num_pages, KMemoryPermission perm) { | ||
| 2112 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2113 | |||
| 2114 | // Create a page group to hold the pages we allocate. | ||
| 2115 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2116 | |||
| 2117 | // Allocate the pages. | ||
| 2118 | R_TRY( | ||
| 2119 | m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); | ||
| 2120 | |||
| 2121 | // Ensure that the page group is closed when we're done working with it. | ||
| 2122 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2123 | |||
| 2124 | // Clear all pages. | ||
| 2125 | for (const auto& it : pg) { | ||
| 2126 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||
| 2127 | it.GetSize()); | ||
| 2128 | } | ||
| 2129 | |||
| 2130 | // Map the pages. | ||
| 2131 | R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup)); | ||
| 2132 | } | ||
| 2133 | |||
| 2134 | Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 2135 | const KPageGroup& pg, const KPageProperties properties, | ||
| 2136 | bool reuse_ll) { | ||
| 2137 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2138 | |||
| 2139 | // Note the current address, so that we can iterate. | ||
| 2140 | const KProcessAddress start_address = address; | ||
| 2141 | KProcessAddress cur_address = address; | ||
| 2142 | |||
| 2143 | // Ensure that we clean up on failure. | ||
| 2144 | ON_RESULT_FAILURE { | ||
| 2145 | ASSERT(!reuse_ll); | ||
| 2146 | if (cur_address != start_address) { | ||
| 2147 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2148 | DisableMergeAttribute::None}; | ||
| 2149 | ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize, | ||
| 2150 | unmap_properties.perm, OperationType::Unmap) == ResultSuccess); | ||
| 2151 | } | ||
| 2152 | }; | ||
| 2153 | |||
| 2154 | // Iterate, mapping all pages in the group. | ||
| 2155 | for (const auto& block : pg) { | ||
| 2156 | // Map and advance. | ||
| 2157 | const KPageProperties cur_properties = | ||
| 2158 | (cur_address == start_address) | ||
| 2159 | ? properties | ||
| 2160 | : KPageProperties{properties.perm, properties.io, properties.uncached, | ||
| 2161 | DisableMergeAttribute::None}; | ||
| 2162 | this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map, | ||
| 2163 | block.GetAddress()); | ||
| 2164 | cur_address += block.GetSize(); | ||
| 2165 | } | ||
| 2166 | |||
| 2167 | // We succeeded! | ||
| 2168 | R_SUCCEED(); | ||
| 2169 | } | ||
| 2170 | |||
| 2171 | void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 2172 | const KPageGroup& pg) { | ||
| 2173 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2174 | |||
| 2175 | // Note the current address, so that we can iterate. | ||
| 2176 | const KProcessAddress start_address = address; | ||
| 2177 | const KProcessAddress last_address = start_address + size - 1; | ||
| 2178 | const KProcessAddress end_address = last_address + 1; | ||
| 2179 | |||
| 2180 | // Iterate over the memory. | ||
| 2181 | auto pg_it = pg.begin(); | ||
| 2182 | ASSERT(pg_it != pg.end()); | ||
| 2183 | |||
| 2184 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 2185 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 2186 | |||
| 2187 | auto it = m_memory_block_manager.FindIterator(start_address); | ||
| 2188 | while (true) { | ||
| 2189 | // Check that the iterator is valid. | ||
| 2190 | ASSERT(it != m_memory_block_manager.end()); | ||
| 2191 | |||
| 2192 | // Get the memory info. | ||
| 2193 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 2194 | |||
| 2195 | // Determine the range to map. | ||
| 2196 | KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address); | ||
| 2197 | const KProcessAddress map_end_address = | ||
| 2198 | std::min<KProcessAddress>(info.GetEndAddress(), end_address); | ||
| 2199 | ASSERT(map_end_address != map_address); | ||
| 2200 | |||
| 2201 | // Determine if we should disable head merge. | ||
| 2202 | const bool disable_head_merge = | ||
| 2203 | info.GetAddress() >= GetInteger(start_address) && | ||
| 2204 | True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2205 | const KPageProperties map_properties = { | ||
| 2206 | info.GetPermission(), false, false, | ||
| 2207 | disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; | ||
| 2208 | |||
| 2209 | // While we have pages to map, map them. | ||
| 2210 | size_t map_pages = (map_end_address - map_address) / PageSize; | ||
| 2211 | while (map_pages > 0) { | ||
| 2212 | // Check if we're at the end of the physical block. | ||
| 2213 | if (pg_pages == 0) { | ||
| 2214 | // Ensure there are more pages to map. | ||
| 2215 | ASSERT(pg_it != pg.end()); | ||
| 2216 | |||
| 2217 | // Advance our physical block. | ||
| 2218 | ++pg_it; | ||
| 2219 | pg_phys_addr = pg_it->GetAddress(); | ||
| 2220 | pg_pages = pg_it->GetNumPages(); | ||
| 2221 | } | ||
| 2222 | |||
| 2223 | // Map whatever we can. | ||
| 2224 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 2225 | ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map, | ||
| 2226 | pg_phys_addr) == ResultSuccess); | ||
| 2227 | |||
| 2228 | // Advance. | ||
| 2229 | map_address += cur_pages * PageSize; | ||
| 2230 | map_pages -= cur_pages; | ||
| 2231 | |||
| 2232 | pg_phys_addr += cur_pages * PageSize; | ||
| 2233 | pg_pages -= cur_pages; | ||
| 2234 | } | ||
| 2235 | |||
| 2236 | // Check if we're done. | ||
| 2237 | if (last_address <= info.GetLastAddress()) { | ||
| 2238 | break; | ||
| 2239 | } | ||
| 2240 | |||
| 2241 | // Advance. | ||
| 2242 | ++it; | ||
| 2243 | } | ||
| 2244 | |||
| 2245 | // Check that we re-mapped precisely the page group. | ||
| 2246 | ASSERT((++pg_it) == pg.end()); | ||
| 2247 | } | ||
| 2248 | |||
| 2249 | Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 2250 | KPhysicalAddress phys_addr, bool is_pa_valid, | ||
| 2251 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2252 | KMemoryState state, KMemoryPermission perm) { | ||
| 2253 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | ||
| 2254 | |||
| 2255 | // Ensure this is a valid map request. | ||
| 2256 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2257 | ResultInvalidCurrentMemory); | ||
| 2258 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2259 | |||
| 2260 | // Lock the table. | ||
| 2261 | KScopedLightLock lk(m_general_lock); | ||
| 2262 | |||
| 2263 | // Find a random address to map at. | ||
| 2264 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, | ||
| 2265 | 0, this->GetNumGuardPages()); | ||
| 2266 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2267 | ASSERT(Common::IsAligned(GetInteger(addr), alignment)); | ||
| 2268 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2269 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||
| 2270 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2271 | KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||
| 2272 | |||
| 2273 | // Create an update allocator. | ||
| 2274 | Result allocator_result; | ||
| 2275 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2276 | m_memory_block_slab_manager); | ||
| 2277 | R_TRY(allocator_result); | ||
| 2278 | |||
| 2279 | // We're going to perform an update, so create a helper. | ||
| 2280 | KScopedPageTableUpdater updater(this); | ||
| 2281 | |||
| 2282 | // Perform mapping operation. | ||
| 2283 | if (is_pa_valid) { | ||
| 2284 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2285 | R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr)); | ||
| 2286 | } else { | ||
| 2287 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | // Update the blocks. | ||
| 2291 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2292 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2293 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2294 | |||
| 2295 | // We successfully mapped the pages. | ||
| 2296 | *out_addr = addr; | ||
| 2297 | R_SUCCEED(); | ||
| 2298 | } | ||
| 2299 | |||
| 2300 | Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 2301 | KMemoryPermission perm) { | ||
| 2302 | // Check that the map is in range. | ||
| 2303 | const size_t size = num_pages * PageSize; | ||
| 2304 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2305 | |||
| 2306 | // Lock the table. | ||
| 2307 | KScopedLightLock lk(m_general_lock); | ||
| 2308 | |||
| 2309 | // Check the memory state. | ||
| 2310 | size_t num_allocator_blocks; | ||
| 2311 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2312 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2313 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2314 | KMemoryAttribute::None)); | ||
| 2315 | |||
| 2316 | // Create an update allocator. | ||
| 2317 | Result allocator_result; | ||
| 2318 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2319 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2320 | R_TRY(allocator_result); | ||
| 2321 | |||
| 2322 | // We're going to perform an update, so create a helper. | ||
| 2323 | KScopedPageTableUpdater updater(this); | ||
| 2324 | |||
| 2325 | // Map the pages. | ||
| 2326 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); | ||
| 2327 | |||
| 2328 | // Update the blocks. | ||
| 2329 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||
| 2330 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2331 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2332 | |||
| 2333 | R_SUCCEED(); | ||
| 2334 | } | ||
| 2335 | |||
| 2336 | Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { | ||
| 2337 | // Check that the unmap is in range. | ||
| 2338 | const size_t size = num_pages * PageSize; | ||
| 2339 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2340 | |||
| 2341 | // Lock the table. | ||
| 2342 | KScopedLightLock lk(m_general_lock); | ||
| 2343 | |||
| 2344 | // Check the memory state. | ||
| 2345 | size_t num_allocator_blocks; | ||
| 2346 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2347 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2348 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2349 | KMemoryAttribute::None)); | ||
| 2350 | |||
| 2351 | // Create an update allocator. | ||
| 2352 | Result allocator_result; | ||
| 2353 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2354 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2355 | R_TRY(allocator_result); | ||
| 2356 | |||
| 2357 | // We're going to perform an update, so create a helper. | ||
| 2358 | KScopedPageTableUpdater updater(this); | ||
| 2359 | |||
| 2360 | // Perform the unmap. | ||
| 2361 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2362 | DisableMergeAttribute::None}; | ||
| 2363 | R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap)); | ||
| 2364 | |||
| 2365 | // Update the blocks. | ||
| 2366 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2367 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2368 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2369 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2370 | |||
| 2371 | R_SUCCEED(); | ||
| 2372 | } | ||
| 2373 | |||
| 2374 | Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 2375 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2376 | KMemoryState state, KMemoryPermission perm) { | ||
| 2377 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2378 | |||
| 2379 | // Ensure this is a valid map request. | ||
| 2380 | const size_t num_pages = pg.GetNumPages(); | ||
| 2381 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2382 | ResultInvalidCurrentMemory); | ||
| 2383 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2384 | |||
| 2385 | // Lock the table. | ||
| 2386 | KScopedLightLock lk(m_general_lock); | ||
| 2387 | |||
| 2388 | // Find a random address to map at. | ||
| 2389 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, | ||
| 2390 | 0, this->GetNumGuardPages()); | ||
| 2391 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2392 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2393 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||
| 2394 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2395 | KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess); | ||
| 2396 | |||
| 2397 | // Create an update allocator. | ||
| 2398 | Result allocator_result; | ||
| 2399 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2400 | m_memory_block_slab_manager); | ||
| 2401 | R_TRY(allocator_result); | ||
| 2402 | |||
| 2403 | // We're going to perform an update, so create a helper. | ||
| 2404 | KScopedPageTableUpdater updater(this); | ||
| 2405 | |||
| 2406 | // Perform mapping operation. | ||
| 2407 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2408 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2409 | |||
| 2410 | // Update the blocks. | ||
| 2411 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2412 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2413 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2414 | |||
| 2415 | // We successfully mapped the pages. | ||
| 2416 | *out_addr = addr; | ||
| 2417 | R_SUCCEED(); | ||
| 2418 | } | ||
| 2419 | |||
| 2420 | Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 2421 | KMemoryPermission perm) { | ||
| 2422 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2423 | |||
| 2424 | // Ensure this is a valid map request. | ||
| 2425 | const size_t num_pages = pg.GetNumPages(); | ||
| 2426 | const size_t size = num_pages * PageSize; | ||
| 2427 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||
| 2428 | |||
| 2429 | // Lock the table. | ||
| 2430 | KScopedLightLock lk(m_general_lock); | ||
| 2431 | |||
| 2432 | // Check if state allows us to map. | ||
| 2433 | size_t num_allocator_blocks; | ||
| 2434 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, | ||
| 2435 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2436 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2437 | KMemoryAttribute::None)); | ||
| 2438 | |||
| 2439 | // Create an update allocator. | ||
| 2440 | Result allocator_result; | ||
| 2441 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2442 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2443 | R_TRY(allocator_result); | ||
| 2444 | |||
| 2445 | // We're going to perform an update, so create a helper. | ||
| 2446 | KScopedPageTableUpdater updater(this); | ||
| 2447 | |||
| 2448 | // Perform mapping operation. | ||
| 2449 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2450 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2451 | |||
| 2452 | // Update the blocks. | ||
| 2453 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2454 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2455 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2456 | |||
| 2457 | // We successfully mapped the pages. | ||
| 2458 | R_SUCCEED(); | ||
| 2459 | } | ||
| 2460 | |||
| 2461 | Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, | ||
| 2462 | KMemoryState state) { | ||
| 2463 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2464 | |||
| 2465 | // Ensure this is a valid unmap request. | ||
| 2466 | const size_t num_pages = pg.GetNumPages(); | ||
| 2467 | const size_t size = num_pages * PageSize; | ||
| 2468 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2469 | |||
| 2470 | // Lock the table. | ||
| 2471 | KScopedLightLock lk(m_general_lock); | ||
| 2472 | |||
| 2473 | // Check if state allows us to unmap. | ||
| 2474 | size_t num_allocator_blocks; | ||
| 2475 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2476 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2477 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2478 | KMemoryAttribute::None)); | ||
| 2479 | |||
| 2480 | // Check that the page group is valid. | ||
| 2481 | R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); | ||
| 2482 | |||
| 2483 | // Create an update allocator. | ||
| 2484 | Result allocator_result; | ||
| 2485 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2486 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2487 | R_TRY(allocator_result); | ||
| 2488 | |||
| 2489 | // We're going to perform an update, so create a helper. | ||
| 2490 | KScopedPageTableUpdater updater(this); | ||
| 2491 | |||
| 2492 | // Perform unmapping operation. | ||
| 2493 | const KPageProperties properties = {KMemoryPermission::None, false, false, | ||
| 2494 | DisableMergeAttribute::None}; | ||
| 2495 | R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap)); | ||
| 2496 | |||
| 2497 | // Update the blocks. | ||
| 2498 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2499 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2500 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2501 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2502 | |||
| 2503 | R_SUCCEED(); | ||
| 2504 | } | ||
| 2505 | |||
| 2506 | Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 2507 | KMemoryState state_mask, KMemoryState state, | ||
| 2508 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 2509 | KMemoryAttribute attr_mask, KMemoryAttribute attr) { | ||
| 2510 | // Ensure that the page group isn't null. | ||
| 2511 | ASSERT(out != nullptr); | ||
| 2512 | |||
| 2513 | // Make sure that the region we're mapping is valid for the table. | ||
| 2514 | const size_t size = num_pages * PageSize; | ||
| 2515 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2516 | |||
| 2517 | // Lock the table. | ||
| 2518 | KScopedLightLock lk(m_general_lock); | ||
| 2519 | |||
| 2520 | // Check if state allows us to create the group. | ||
| 2521 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 2522 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 2523 | attr_mask, attr)); | ||
| 2524 | |||
| 2525 | // Create a new page group for the region. | ||
| 2526 | R_TRY(this->MakePageGroup(*out, address, num_pages)); | ||
| 2527 | |||
| 2528 | R_SUCCEED(); | ||
| 2529 | } | ||
| 2530 | |||
| 2531 | Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 2532 | Svc::MemoryPermission svc_perm) { | ||
| 2533 | const size_t num_pages = size / PageSize; | ||
| 2534 | |||
| 2535 | // Lock the table. | ||
| 2536 | KScopedLightLock lk(m_general_lock); | ||
| 2537 | |||
| 2538 | // Verify we can change the memory permission. | ||
| 2539 | KMemoryState old_state; | ||
| 2540 | KMemoryPermission old_perm; | ||
| 2541 | size_t num_allocator_blocks; | ||
| 2542 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 2543 | std::addressof(num_allocator_blocks), addr, size, | ||
| 2544 | KMemoryState::FlagCode, KMemoryState::FlagCode, | ||
| 2545 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2546 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2547 | |||
| 2548 | // Determine new perm/state. | ||
| 2549 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 2550 | KMemoryState new_state = old_state; | ||
| 2551 | const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite; | ||
| 2552 | const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 2553 | const bool was_x = | ||
| 2554 | (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 2555 | ASSERT(!(is_w && is_x)); | ||
| 2556 | |||
| 2557 | if (is_w) { | ||
| 2558 | switch (old_state) { | ||
| 2559 | case KMemoryState::Code: | ||
| 2560 | new_state = KMemoryState::CodeData; | ||
| 2561 | break; | ||
| 2562 | case KMemoryState::AliasCode: | ||
| 2563 | new_state = KMemoryState::AliasCodeData; | ||
| 2564 | break; | ||
| 2565 | default: | ||
| 2566 | ASSERT(false); | ||
| 2567 | break; | ||
| 2568 | } | ||
| 2569 | } | ||
| 2570 | |||
| 2571 | // Succeed if there's nothing to do. | ||
| 2572 | R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); | ||
| 2573 | |||
| 2574 | // Create an update allocator. | ||
| 2575 | Result allocator_result{ResultSuccess}; | ||
| 2576 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2577 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2578 | R_TRY(allocator_result); | ||
| 2579 | |||
| 2580 | // Perform mapping operation. | ||
| 2581 | const auto operation = | ||
| 2582 | was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; | ||
| 2583 | R_TRY(Operate(addr, num_pages, new_perm, operation)); | ||
| 2584 | |||
| 2585 | // Update the blocks. | ||
| 2586 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, | ||
| 2587 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 2588 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2589 | |||
| 2590 | // Ensure cache coherency, if we're setting pages as executable. | ||
| 2591 | if (is_x) { | ||
| 2592 | m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size); | ||
| 2593 | } | ||
| 2594 | |||
| 2595 | R_SUCCEED(); | ||
| 2596 | } | ||
| 2597 | |||
| 2598 | KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) { | ||
| 2599 | KScopedLightLock lk(m_general_lock); | ||
| 2600 | |||
| 2601 | return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); | ||
| 2602 | } | ||
| 2603 | |||
| 2604 | KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) { | ||
| 2605 | if (!Contains(addr, 1)) { | ||
| 2606 | return { | ||
| 2607 | .m_address = GetInteger(m_address_space_end), | ||
| 2608 | .m_size = 0 - GetInteger(m_address_space_end), | ||
| 2609 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), | ||
| 2610 | .m_device_disable_merge_left_count = 0, | ||
| 2611 | .m_device_disable_merge_right_count = 0, | ||
| 2612 | .m_ipc_lock_count = 0, | ||
| 2613 | .m_device_use_count = 0, | ||
| 2614 | .m_ipc_disable_merge_count = 0, | ||
| 2615 | .m_permission = KMemoryPermission::None, | ||
| 2616 | .m_attribute = KMemoryAttribute::None, | ||
| 2617 | .m_original_permission = KMemoryPermission::None, | ||
| 2618 | .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, | ||
| 2619 | }; | ||
| 2620 | } | ||
| 2621 | |||
| 2622 | return QueryInfoImpl(addr); | ||
| 2623 | } | ||
| 2624 | |||
| 2625 | Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size, | ||
| 2626 | Svc::MemoryPermission svc_perm) { | ||
| 2627 | const size_t num_pages = size / PageSize; | ||
| 2628 | |||
| 2629 | // Lock the table. | ||
| 2630 | KScopedLightLock lk(m_general_lock); | ||
| 2631 | |||
| 2632 | // Verify we can change the memory permission. | ||
| 2633 | KMemoryState old_state; | ||
| 2634 | KMemoryPermission old_perm; | ||
| 2635 | size_t num_allocator_blocks; | ||
| 2636 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 2637 | std::addressof(num_allocator_blocks), addr, size, | ||
| 2638 | KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, | ||
| 2639 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2640 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2641 | |||
| 2642 | // Determine new perm. | ||
| 2643 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 2644 | R_SUCCEED_IF(old_perm == new_perm); | ||
| 2645 | |||
| 2646 | // Create an update allocator. | ||
| 2647 | Result allocator_result{ResultSuccess}; | ||
| 2648 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2649 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2650 | R_TRY(allocator_result); | ||
| 2651 | |||
| 2652 | // Perform mapping operation. | ||
| 2653 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 2654 | |||
| 2655 | // Update the blocks. | ||
| 2656 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 2657 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 2658 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2659 | |||
| 2660 | R_SUCCEED(); | ||
| 2661 | } | ||
| 2662 | |||
| 2663 | Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) { | ||
| 2664 | const size_t num_pages = size / PageSize; | ||
| 2665 | ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == | ||
| 2666 | KMemoryAttribute::SetMask); | ||
| 2667 | |||
| 2668 | // Lock the table. | ||
| 2669 | KScopedLightLock lk(m_general_lock); | ||
| 2670 | |||
| 2671 | // Verify we can change the memory attribute. | ||
| 2672 | KMemoryState old_state; | ||
| 2673 | KMemoryPermission old_perm; | ||
| 2674 | KMemoryAttribute old_attr; | ||
| 2675 | size_t num_allocator_blocks; | ||
| 2676 | constexpr auto AttributeTestMask = | ||
| 2677 | ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); | ||
| 2678 | const KMemoryState state_test_mask = | ||
| 2679 | static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached)) | ||
| 2680 | ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute) | ||
| 2681 | : 0) | | ||
| 2682 | ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked)) | ||
| 2683 | ? static_cast<u32>(KMemoryState::FlagCanPermissionLock) | ||
| 2684 | : 0)); | ||
| 2685 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 2686 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 2687 | addr, size, state_test_mask, state_test_mask, | ||
| 2688 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2689 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | ||
| 2690 | |||
| 2691 | // Create an update allocator. | ||
| 2692 | Result allocator_result{ResultSuccess}; | ||
| 2693 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2694 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2695 | R_TRY(allocator_result); | ||
| 2696 | |||
| 2697 | // If we need to, perform a change attribute operation. | ||
| 2698 | if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) { | ||
| 2699 | // Perform operation. | ||
| 2700 | R_TRY(this->Operate(addr, num_pages, old_perm, | ||
| 2701 | OperationType::ChangePermissionsAndRefreshAndFlush, 0)); | ||
| 2702 | } | ||
| 2703 | |||
| 2704 | // Update the blocks. | ||
| 2705 | m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, | ||
| 2706 | static_cast<KMemoryAttribute>(mask), | ||
| 2707 | static_cast<KMemoryAttribute>(attr)); | ||
| 2708 | |||
| 2709 | R_SUCCEED(); | ||
| 2710 | } | ||
| 2711 | |||
| 2712 | Result KPageTable::SetMaxHeapSize(size_t size) { | ||
| 2713 | // Lock the table. | ||
| 2714 | KScopedLightLock lk(m_general_lock); | ||
| 2715 | |||
| 2716 | // Only process page tables are allowed to set heap size. | ||
| 2717 | ASSERT(!this->IsKernel()); | ||
| 2718 | |||
| 2719 | m_max_heap_size = size; | ||
| 2720 | |||
| 2721 | R_SUCCEED(); | ||
| 2722 | } | ||
| 2723 | |||
| 2724 | Result KPageTable::SetHeapSize(u64* out, size_t size) { | ||
| 2725 | // Lock the physical memory mutex. | ||
| 2726 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | ||
| 2727 | |||
| 2728 | // Try to perform a reduction in heap, instead of an extension. | ||
| 2729 | KProcessAddress cur_address{}; | ||
| 2730 | size_t allocation_size{}; | ||
| 2731 | { | ||
| 2732 | // Lock the table. | ||
| 2733 | KScopedLightLock lk(m_general_lock); | ||
| 2734 | |||
| 2735 | // Validate that setting heap size is possible at all. | ||
| 2736 | R_UNLESS(!m_is_kernel, ResultOutOfMemory); | ||
| 2737 | R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), | ||
| 2738 | ResultOutOfMemory); | ||
| 2739 | R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); | ||
| 2740 | |||
| 2741 | if (size < GetHeapSize()) { | ||
| 2742 | // The size being requested is less than the current size, so we need to free the end of | ||
| 2743 | // the heap. | ||
| 2744 | |||
| 2745 | // Validate memory state. | ||
| 2746 | size_t num_allocator_blocks; | ||
| 2747 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), | ||
| 2748 | m_heap_region_start + size, GetHeapSize() - size, | ||
| 2749 | KMemoryState::All, KMemoryState::Normal, | ||
| 2750 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 2751 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2752 | |||
| 2753 | // Create an update allocator. | ||
| 2754 | Result allocator_result{ResultSuccess}; | ||
| 2755 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2756 | m_memory_block_slab_manager, | ||
| 2757 | num_allocator_blocks); | ||
| 2758 | R_TRY(allocator_result); | ||
| 2759 | |||
| 2760 | // Unmap the end of the heap. | ||
| 2761 | const auto num_pages = (GetHeapSize() - size) / PageSize; | ||
| 2762 | R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, | ||
| 2763 | OperationType::Unmap)); | ||
| 2764 | |||
| 2765 | // Release the memory from the resource limit. | ||
| 2766 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize); | ||
| 2767 | |||
| 2768 | // Apply the memory block update. | ||
| 2769 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | ||
| 2770 | num_pages, KMemoryState::Free, KMemoryPermission::None, | ||
| 2771 | KMemoryAttribute::None, | ||
| 2772 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2773 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2774 | : KMemoryBlockDisableMergeAttribute::None); | ||
| 2775 | |||
| 2776 | // Update the current heap end. | ||
| 2777 | m_current_heap_end = m_heap_region_start + size; | ||
| 2778 | |||
| 2779 | // Set the output. | ||
| 2780 | *out = GetInteger(m_heap_region_start); | ||
| 2781 | R_SUCCEED(); | ||
| 2782 | } else if (size == GetHeapSize()) { | ||
| 2783 | // The size requested is exactly the current size. | ||
| 2784 | *out = GetInteger(m_heap_region_start); | ||
| 2785 | R_SUCCEED(); | ||
| 2786 | } else { | ||
| 2787 | // We have to allocate memory. Determine how much to allocate and where while the table | ||
| 2788 | // is locked. | ||
| 2789 | cur_address = m_current_heap_end; | ||
| 2790 | allocation_size = size - GetHeapSize(); | ||
| 2791 | } | ||
| 2792 | } | ||
| 2793 | |||
| 2794 | // Reserve memory for the heap extension. | ||
| 2795 | KScopedResourceReservation memory_reservation( | ||
| 2796 | m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size); | ||
| 2797 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 2798 | |||
| 2799 | // Allocate pages for the heap extension. | ||
| 2800 | KPageGroup pg{m_kernel, m_block_info_manager}; | ||
| 2801 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 2802 | &pg, allocation_size / PageSize, | ||
| 2803 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); | ||
| 2804 | |||
| 2805 | // Clear all the newly allocated pages. | ||
| 2806 | for (const auto& it : pg) { | ||
| 2807 | std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, | ||
| 2808 | it.GetSize()); | ||
| 2809 | } | ||
| 2810 | |||
| 2811 | // Map the pages. | ||
| 2812 | { | ||
| 2813 | // Lock the table. | ||
| 2814 | KScopedLightLock lk(m_general_lock); | ||
| 2815 | |||
| 2816 | // Ensure that the heap hasn't changed since we began executing. | ||
| 2817 | ASSERT(cur_address == m_current_heap_end); | ||
| 2818 | |||
| 2819 | // Check the memory state. | ||
| 2820 | size_t num_allocator_blocks{}; | ||
| 2821 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, | ||
| 2822 | allocation_size, KMemoryState::All, KMemoryState::Free, | ||
| 2823 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2824 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2825 | |||
| 2826 | // Create an update allocator. | ||
| 2827 | Result allocator_result{ResultSuccess}; | ||
| 2828 | KMemoryBlockManagerUpdateAllocator allocator( | ||
| 2829 | std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2830 | R_TRY(allocator_result); | ||
| 2831 | |||
| 2832 | // Map the pages. | ||
| 2833 | const auto num_pages = allocation_size / PageSize; | ||
| 2834 | R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); | ||
| 2835 | |||
| 2836 | // Clear all the newly allocated pages. | ||
| 2837 | for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { | ||
| 2838 | std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, | ||
| 2839 | PageSize); | ||
| 2840 | } | ||
| 2841 | |||
| 2842 | // We succeeded, so commit our memory reservation. | ||
| 2843 | memory_reservation.Commit(); | ||
| 2844 | |||
| 2845 | // Apply the memory block update. | ||
| 2846 | m_memory_block_manager.Update( | ||
| 2847 | std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, | ||
| 2848 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2849 | m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2850 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 2851 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2852 | |||
| 2853 | // Update the current heap end. | ||
| 2854 | m_current_heap_end = m_heap_region_start + size; | ||
| 2855 | |||
| 2856 | // Set the output. | ||
| 2857 | *out = GetInteger(m_heap_region_start); | ||
| 2858 | R_SUCCEED(); | ||
| 2859 | } | ||
| 2860 | } | ||
| 2861 | |||
| 2862 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, | ||
| 2863 | size_t size, KMemoryPermission perm, | ||
| 2864 | bool is_aligned, bool check_heap) { | ||
| 2865 | // Lightly validate the range before doing anything else. | ||
| 2866 | const size_t num_pages = size / PageSize; | ||
| 2867 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2868 | |||
| 2869 | // Lock the table. | ||
| 2870 | KScopedLightLock lk(m_general_lock); | ||
| 2871 | |||
| 2872 | // Check the memory state. | ||
| 2873 | const auto test_state = | ||
| 2874 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | | ||
| 2875 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 2876 | size_t num_allocator_blocks; | ||
| 2877 | KMemoryState old_state; | ||
| 2878 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 2879 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 2880 | test_state, perm, perm, | ||
| 2881 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | ||
| 2882 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | ||
| 2883 | |||
| 2884 | // Create an update allocator. | ||
| 2885 | Result allocator_result; | ||
| 2886 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2887 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2888 | R_TRY(allocator_result); | ||
| 2889 | |||
| 2890 | // Update the memory blocks. | ||
| 2891 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 2892 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | ||
| 2893 | |||
| 2894 | // Set whether the locked memory was io. | ||
| 2895 | *out_is_io = | ||
| 2896 | static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; | ||
| 2897 | |||
| 2898 | R_SUCCEED(); | ||
| 2899 | } | ||
| 2900 | |||
| 2901 | Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, | ||
| 2902 | bool check_heap) { | ||
| 2903 | // Lightly validate the range before doing anything else. | ||
| 2904 | const size_t num_pages = size / PageSize; | ||
| 2905 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2906 | |||
| 2907 | // Lock the table. | ||
| 2908 | KScopedLightLock lk(m_general_lock); | ||
| 2909 | |||
| 2910 | // Check the memory state. | ||
| 2911 | const auto test_state = KMemoryState::FlagCanDeviceMap | | ||
| 2912 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 2913 | size_t num_allocator_blocks; | ||
| 2914 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 2915 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, | ||
| 2916 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2917 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 2918 | |||
| 2919 | // Create an update allocator. | ||
| 2920 | Result allocator_result; | ||
| 2921 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2922 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2923 | R_TRY(allocator_result); | ||
| 2924 | |||
| 2925 | // Update the memory blocks. | ||
| 2926 | const KMemoryBlockManager::MemoryBlockLockFunction lock_func = | ||
| 2927 | m_enable_device_address_space_merge | ||
| 2928 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare | ||
| 2929 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; | ||
| 2930 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, | ||
| 2931 | KMemoryPermission::None); | ||
| 2932 | |||
| 2933 | R_SUCCEED(); | ||
| 2934 | } | ||
| 2935 | |||
| 2936 | Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { | ||
| 2937 | // Lightly validate the range before doing anything else. | ||
| 2938 | const size_t num_pages = size / PageSize; | ||
| 2939 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2940 | |||
| 2941 | // Lock the table. | ||
| 2942 | KScopedLightLock lk(m_general_lock); | ||
| 2943 | |||
| 2944 | // Check the memory state. | ||
| 2945 | size_t num_allocator_blocks; | ||
| 2946 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 2947 | std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 2948 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 2949 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 2950 | |||
| 2951 | // Create an update allocator. | ||
| 2952 | Result allocator_result{ResultSuccess}; | ||
| 2953 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2954 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2955 | R_TRY(allocator_result); | ||
| 2956 | |||
| 2957 | // Update the memory blocks. | ||
| 2958 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 2959 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); | ||
| 2960 | |||
| 2961 | R_SUCCEED(); | ||
| 2962 | } | ||
| 2963 | |||
| 2964 | Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, | ||
| 2965 | size_t size) { | ||
| 2966 | R_RETURN(this->LockMemoryAndOpen( | ||
| 2967 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2968 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 2969 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 2970 | KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 2971 | KMemoryAttribute::Locked)); | ||
| 2972 | } | ||
| 2973 | |||
| 2974 | Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { | ||
| 2975 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2976 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 2977 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2978 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2979 | KMemoryAttribute::Locked, nullptr)); | ||
| 2980 | } | ||
| 2981 | |||
| 2982 | Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 2983 | KMemoryPermission perm) { | ||
| 2984 | R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer, | ||
| 2985 | KMemoryState::FlagCanTransfer, KMemoryPermission::All, | ||
| 2986 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 2987 | KMemoryAttribute::None, perm, KMemoryAttribute::Locked)); | ||
| 2988 | } | ||
| 2989 | |||
| 2990 | Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size, | ||
| 2991 | const KPageGroup& pg) { | ||
| 2992 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer, | ||
| 2993 | KMemoryState::FlagCanTransfer, KMemoryPermission::None, | ||
| 2994 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2995 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2996 | KMemoryAttribute::Locked, std::addressof(pg))); | ||
| 2997 | } | ||
| 2998 | |||
| 2999 | Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) { | ||
| 3000 | R_RETURN(this->LockMemoryAndOpen( | ||
| 3001 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | ||
| 3002 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 3003 | KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 3004 | KMemoryAttribute::Locked)); | ||
| 3005 | } | ||
| 3006 | |||
| 3007 | Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) { | ||
| 3008 | R_RETURN(this->UnlockMemory( | ||
| 3009 | addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | ||
| 3010 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3011 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); | ||
| 3012 | } | ||
| 3013 | |||
| 3014 | bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const { | ||
| 3015 | auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr)); | ||
| 3016 | for (u64 offset{}; offset < size; offset += PageSize) { | ||
| 3017 | if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) { | ||
| 3018 | return false; | ||
| 3019 | } | ||
| 3020 | start_ptr += PageSize; | ||
| 3021 | } | ||
| 3022 | return true; | ||
| 3023 | } | ||
| 3024 | |||
| 3025 | void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages, | ||
| 3026 | KPageGroup& page_linked_list) { | ||
| 3027 | KProcessAddress addr{start}; | ||
| 3028 | while (addr < start + (num_pages * PageSize)) { | ||
| 3029 | const KPhysicalAddress paddr{GetPhysicalAddr(addr)}; | ||
| 3030 | ASSERT(paddr != 0); | ||
| 3031 | page_linked_list.AddBlock(paddr, 1); | ||
| 3032 | addr += PageSize; | ||
| 3033 | } | ||
| 3034 | } | ||
| 3035 | |||
| 3036 | KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages, | ||
| 3037 | u64 needed_num_pages, size_t align) { | ||
| 3038 | if (m_enable_aslr) { | ||
| 3039 | UNIMPLEMENTED(); | ||
| 3040 | } | ||
| 3041 | return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, | ||
| 3042 | IsKernel() ? 1 : 4); | ||
| 3043 | } | ||
| 3044 | |||
| 3045 | Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group, | ||
| 3046 | OperationType operation) { | ||
| 3047 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3048 | |||
| 3049 | ASSERT(Common::IsAligned(GetInteger(addr), PageSize)); | ||
| 3050 | ASSERT(num_pages > 0); | ||
| 3051 | ASSERT(num_pages == page_group.GetNumPages()); | ||
| 3052 | |||
| 3053 | switch (operation) { | ||
| 3054 | case OperationType::MapGroup: | ||
| 3055 | case OperationType::MapFirstGroup: { | ||
| 3056 | // We want to maintain a new reference to every page in the group. | ||
| 3057 | KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); | ||
| 3058 | |||
| 3059 | for (const auto& node : page_group) { | ||
| 3060 | const size_t size{node.GetNumPages() * PageSize}; | ||
| 3061 | |||
| 3062 | // Map the pages. | ||
| 3063 | m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); | ||
| 3064 | |||
| 3065 | addr += size; | ||
| 3066 | } | ||
| 3067 | |||
| 3068 | // We succeeded! We want to persist the reference to the pages. | ||
| 3069 | spg.CancelClose(); | ||
| 3070 | |||
| 3071 | break; | ||
| 3072 | } | ||
| 3073 | default: | ||
| 3074 | ASSERT(false); | ||
| 3075 | break; | ||
| 3076 | } | ||
| 3077 | |||
| 3078 | R_SUCCEED(); | ||
| 3079 | } | ||
| 3080 | |||
| 3081 | Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, | ||
| 3082 | OperationType operation, KPhysicalAddress map_addr) { | ||
| 3083 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3084 | |||
| 3085 | ASSERT(num_pages > 0); | ||
| 3086 | ASSERT(Common::IsAligned(GetInteger(addr), PageSize)); | ||
| 3087 | ASSERT(ContainsPages(addr, num_pages)); | ||
| 3088 | |||
| 3089 | switch (operation) { | ||
| 3090 | case OperationType::Unmap: { | ||
| 3091 | // Ensure that any pages we track close on exit. | ||
| 3092 | KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()}; | ||
| 3093 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||
| 3094 | |||
| 3095 | this->AddRegionToPages(addr, num_pages, pages_to_close); | ||
| 3096 | m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); | ||
| 3097 | break; | ||
| 3098 | } | ||
| 3099 | case OperationType::Map: { | ||
| 3100 | ASSERT(map_addr); | ||
| 3101 | ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); | ||
| 3102 | m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | ||
| 3103 | |||
| 3104 | // Open references to pages, if we should. | ||
| 3105 | if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { | ||
| 3106 | m_kernel.MemoryManager().Open(map_addr, num_pages); | ||
| 3107 | } | ||
| 3108 | break; | ||
| 3109 | } | ||
| 3110 | case OperationType::Separate: { | ||
| 3111 | // HACK: Unimplemented. | ||
| 3112 | break; | ||
| 3113 | } | ||
| 3114 | case OperationType::ChangePermissions: | ||
| 3115 | case OperationType::ChangePermissionsAndRefresh: | ||
| 3116 | case OperationType::ChangePermissionsAndRefreshAndFlush: | ||
| 3117 | break; | ||
| 3118 | default: | ||
| 3119 | ASSERT(false); | ||
| 3120 | break; | ||
| 3121 | } | ||
| 3122 | R_SUCCEED(); | ||
| 3123 | } | ||
| 3124 | |||
| 3125 | void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 3126 | while (page_list->Peek()) { | ||
| 3127 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 3128 | |||
| 3129 | // TODO(bunnei): Free pages once they are allocated in guest memory | ||
| 3130 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 3131 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 3132 | // this->GetPageTableManager().Free(page); | ||
| 3133 | } | ||
| 3134 | } | ||
| 3135 | |||
| 3136 | KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const { | ||
| 3137 | switch (state) { | ||
| 3138 | case Svc::MemoryState::Free: | ||
| 3139 | case Svc::MemoryState::Kernel: | ||
| 3140 | return m_address_space_start; | ||
| 3141 | case Svc::MemoryState::Normal: | ||
| 3142 | return m_heap_region_start; | ||
| 3143 | case Svc::MemoryState::Ipc: | ||
| 3144 | case Svc::MemoryState::NonSecureIpc: | ||
| 3145 | case Svc::MemoryState::NonDeviceIpc: | ||
| 3146 | return m_alias_region_start; | ||
| 3147 | case Svc::MemoryState::Stack: | ||
| 3148 | return m_stack_region_start; | ||
| 3149 | case Svc::MemoryState::Static: | ||
| 3150 | case Svc::MemoryState::ThreadLocal: | ||
| 3151 | return m_kernel_map_region_start; | ||
| 3152 | case Svc::MemoryState::Io: | ||
| 3153 | case Svc::MemoryState::Shared: | ||
| 3154 | case Svc::MemoryState::AliasCode: | ||
| 3155 | case Svc::MemoryState::AliasCodeData: | ||
| 3156 | case Svc::MemoryState::Transfered: | ||
| 3157 | case Svc::MemoryState::SharedTransfered: | ||
| 3158 | case Svc::MemoryState::SharedCode: | ||
| 3159 | case Svc::MemoryState::GeneratedCode: | ||
| 3160 | case Svc::MemoryState::CodeOut: | ||
| 3161 | case Svc::MemoryState::Coverage: | ||
| 3162 | case Svc::MemoryState::Insecure: | ||
| 3163 | return m_alias_code_region_start; | ||
| 3164 | case Svc::MemoryState::Code: | ||
| 3165 | case Svc::MemoryState::CodeData: | ||
| 3166 | return m_code_region_start; | ||
| 3167 | default: | ||
| 3168 | UNREACHABLE(); | ||
| 3169 | } | ||
| 3170 | } | ||
| 3171 | |||
| 3172 | size_t KPageTable::GetRegionSize(Svc::MemoryState state) const { | ||
| 3173 | switch (state) { | ||
| 3174 | case Svc::MemoryState::Free: | ||
| 3175 | case Svc::MemoryState::Kernel: | ||
| 3176 | return m_address_space_end - m_address_space_start; | ||
| 3177 | case Svc::MemoryState::Normal: | ||
| 3178 | return m_heap_region_end - m_heap_region_start; | ||
| 3179 | case Svc::MemoryState::Ipc: | ||
| 3180 | case Svc::MemoryState::NonSecureIpc: | ||
| 3181 | case Svc::MemoryState::NonDeviceIpc: | ||
| 3182 | return m_alias_region_end - m_alias_region_start; | ||
| 3183 | case Svc::MemoryState::Stack: | ||
| 3184 | return m_stack_region_end - m_stack_region_start; | ||
| 3185 | case Svc::MemoryState::Static: | ||
| 3186 | case Svc::MemoryState::ThreadLocal: | ||
| 3187 | return m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 3188 | case Svc::MemoryState::Io: | ||
| 3189 | case Svc::MemoryState::Shared: | ||
| 3190 | case Svc::MemoryState::AliasCode: | ||
| 3191 | case Svc::MemoryState::AliasCodeData: | ||
| 3192 | case Svc::MemoryState::Transfered: | ||
| 3193 | case Svc::MemoryState::SharedTransfered: | ||
| 3194 | case Svc::MemoryState::SharedCode: | ||
| 3195 | case Svc::MemoryState::GeneratedCode: | ||
| 3196 | case Svc::MemoryState::CodeOut: | ||
| 3197 | case Svc::MemoryState::Coverage: | ||
| 3198 | case Svc::MemoryState::Insecure: | ||
| 3199 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 3200 | case Svc::MemoryState::Code: | ||
| 3201 | case Svc::MemoryState::CodeData: | ||
| 3202 | return m_code_region_end - m_code_region_start; | ||
| 3203 | default: | ||
| 3204 | UNREACHABLE(); | ||
| 3205 | } | ||
| 3206 | } | ||
| 3207 | |||
| 3208 | bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { | ||
| 3209 | const KProcessAddress end = addr + size; | ||
| 3210 | const KProcessAddress last = end - 1; | ||
| 3211 | |||
| 3212 | const KProcessAddress region_start = this->GetRegionAddress(state); | ||
| 3213 | const size_t region_size = this->GetRegionSize(state); | ||
| 3214 | |||
| 3215 | const bool is_in_region = | ||
| 3216 | region_start <= addr && addr < end && last <= region_start + region_size - 1; | ||
| 3217 | const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || | ||
| 3218 | m_heap_region_start == m_heap_region_end); | ||
| 3219 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || | ||
| 3220 | m_alias_region_start == m_alias_region_end); | ||
| 3221 | switch (state) { | ||
| 3222 | case Svc::MemoryState::Free: | ||
| 3223 | case Svc::MemoryState::Kernel: | ||
| 3224 | return is_in_region; | ||
| 3225 | case Svc::MemoryState::Io: | ||
| 3226 | case Svc::MemoryState::Static: | ||
| 3227 | case Svc::MemoryState::Code: | ||
| 3228 | case Svc::MemoryState::CodeData: | ||
| 3229 | case Svc::MemoryState::Shared: | ||
| 3230 | case Svc::MemoryState::AliasCode: | ||
| 3231 | case Svc::MemoryState::AliasCodeData: | ||
| 3232 | case Svc::MemoryState::Stack: | ||
| 3233 | case Svc::MemoryState::ThreadLocal: | ||
| 3234 | case Svc::MemoryState::Transfered: | ||
| 3235 | case Svc::MemoryState::SharedTransfered: | ||
| 3236 | case Svc::MemoryState::SharedCode: | ||
| 3237 | case Svc::MemoryState::GeneratedCode: | ||
| 3238 | case Svc::MemoryState::CodeOut: | ||
| 3239 | case Svc::MemoryState::Coverage: | ||
| 3240 | case Svc::MemoryState::Insecure: | ||
| 3241 | return is_in_region && !is_in_heap && !is_in_alias; | ||
| 3242 | case Svc::MemoryState::Normal: | ||
| 3243 | ASSERT(is_in_heap); | ||
| 3244 | return is_in_region && !is_in_alias; | ||
| 3245 | case Svc::MemoryState::Ipc: | ||
| 3246 | case Svc::MemoryState::NonSecureIpc: | ||
| 3247 | case Svc::MemoryState::NonDeviceIpc: | ||
| 3248 | ASSERT(is_in_alias); | ||
| 3249 | return is_in_region && !is_in_heap; | ||
| 3250 | default: | ||
| 3251 | return false; | ||
| 3252 | } | ||
| 3253 | } | ||
| 3254 | |||
| 3255 | Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, | ||
| 3256 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3257 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3258 | KMemoryAttribute attr) const { | ||
| 3259 | // Validate the states match expectation. | ||
| 3260 | R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); | ||
| 3261 | R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); | ||
| 3262 | R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); | ||
| 3263 | |||
| 3264 | R_SUCCEED(); | ||
| 3265 | } | ||
| 3266 | |||
| 3267 | Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, | ||
| 3268 | size_t size, KMemoryState state_mask, | ||
| 3269 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3270 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3271 | KMemoryAttribute attr) const { | ||
| 3272 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3273 | |||
| 3274 | // Get information about the first block. | ||
| 3275 | const KProcessAddress last_addr = addr + size - 1; | ||
| 3276 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 3277 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 3278 | |||
| 3279 | // If the start address isn't aligned, we need a block. | ||
| 3280 | const size_t blocks_for_start_align = | ||
| 3281 | (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; | ||
| 3282 | |||
| 3283 | while (true) { | ||
| 3284 | // Validate against the provided masks. | ||
| 3285 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 3286 | |||
| 3287 | // Break once we're done. | ||
| 3288 | if (last_addr <= info.GetLastAddress()) { | ||
| 3289 | break; | ||
| 3290 | } | ||
| 3291 | |||
| 3292 | // Advance our iterator. | ||
| 3293 | it++; | ||
| 3294 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 3295 | info = it->GetMemoryInfo(); | ||
| 3296 | } | ||
| 3297 | |||
| 3298 | // If the end address isn't aligned, we need a block. | ||
| 3299 | const size_t blocks_for_end_align = | ||
| 3300 | (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; | ||
| 3301 | |||
| 3302 | if (out_blocks_needed != nullptr) { | ||
| 3303 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | ||
| 3304 | } | ||
| 3305 | |||
| 3306 | R_SUCCEED(); | ||
| 3307 | } | ||
| 3308 | |||
| 3309 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 3310 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 3311 | KMemoryBlockManager::const_iterator it, | ||
| 3312 | KProcessAddress last_addr, KMemoryState state_mask, | ||
| 3313 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3314 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3315 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 3316 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3317 | |||
| 3318 | // Get information about the first block. | ||
| 3319 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 3320 | |||
| 3321 | // Validate all blocks in the range have correct state. | ||
| 3322 | const KMemoryState first_state = info.m_state; | ||
| 3323 | const KMemoryPermission first_perm = info.m_permission; | ||
| 3324 | const KMemoryAttribute first_attr = info.m_attribute; | ||
| 3325 | while (true) { | ||
| 3326 | // Validate the current block. | ||
| 3327 | R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); | ||
| 3328 | R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); | ||
| 3329 | R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), | ||
| 3330 | ResultInvalidCurrentMemory); | ||
| 3331 | |||
| 3332 | // Validate against the provided masks. | ||
| 3333 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 3334 | |||
| 3335 | // Break once we're done. | ||
| 3336 | if (last_addr <= info.GetLastAddress()) { | ||
| 3337 | break; | ||
| 3338 | } | ||
| 3339 | |||
| 3340 | // Advance our iterator. | ||
| 3341 | it++; | ||
| 3342 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 3343 | info = it->GetMemoryInfo(); | ||
| 3344 | } | ||
| 3345 | |||
| 3346 | // Write output state. | ||
| 3347 | if (out_state != nullptr) { | ||
| 3348 | *out_state = first_state; | ||
| 3349 | } | ||
| 3350 | if (out_perm != nullptr) { | ||
| 3351 | *out_perm = first_perm; | ||
| 3352 | } | ||
| 3353 | if (out_attr != nullptr) { | ||
| 3354 | *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); | ||
| 3355 | } | ||
| 3356 | |||
| 3357 | // If the end address isn't aligned, we need a block. | ||
| 3358 | if (out_blocks_needed != nullptr) { | ||
| 3359 | const size_t blocks_for_end_align = | ||
| 3360 | (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) | ||
| 3361 | ? 1 | ||
| 3362 | : 0; | ||
| 3363 | *out_blocks_needed = blocks_for_end_align; | ||
| 3364 | } | ||
| 3365 | |||
| 3366 | R_SUCCEED(); | ||
| 3367 | } | ||
| 3368 | |||
| 3369 | Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 3370 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 3371 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3372 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3373 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3374 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 3375 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3376 | |||
| 3377 | // Check memory state. | ||
| 3378 | const KProcessAddress last_addr = addr + size - 1; | ||
| 3379 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 3380 | R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, | ||
| 3381 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); | ||
| 3382 | |||
| 3383 | // If the start address isn't aligned, we need a block. | ||
| 3384 | if (out_blocks_needed != nullptr && | ||
| 3385 | Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { | ||
| 3386 | ++(*out_blocks_needed); | ||
| 3387 | } | ||
| 3388 | |||
| 3389 | R_SUCCEED(); | ||
| 3390 | } | ||
| 3391 | |||
| 3392 | Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress, | ||
| 3393 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3394 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3395 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3396 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 3397 | KMemoryAttribute lock_attr) { | ||
| 3398 | // Validate basic preconditions. | ||
| 3399 | ASSERT((lock_attr & attr) == KMemoryAttribute::None); | ||
| 3400 | ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == | ||
| 3401 | KMemoryAttribute::None); | ||
| 3402 | |||
| 3403 | // Validate the lock request. | ||
| 3404 | const size_t num_pages = size / PageSize; | ||
| 3405 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 3406 | |||
| 3407 | // Lock the table. | ||
| 3408 | KScopedLightLock lk(m_general_lock); | ||
| 3409 | |||
| 3410 | // Check that the output page group is empty, if it exists. | ||
| 3411 | if (out_pg) { | ||
| 3412 | ASSERT(out_pg->GetNumPages() == 0); | ||
| 3413 | } | ||
| 3414 | |||
| 3415 | // Check the state. | ||
| 3416 | KMemoryState old_state{}; | ||
| 3417 | KMemoryPermission old_perm{}; | ||
| 3418 | KMemoryAttribute old_attr{}; | ||
| 3419 | size_t num_allocator_blocks{}; | ||
| 3420 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 3421 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 3422 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 3423 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 3424 | attr_mask, attr)); | ||
| 3425 | |||
| 3426 | // Get the physical address, if we're supposed to. | ||
| 3427 | if (out_KPhysicalAddress != nullptr) { | ||
| 3428 | ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr)); | ||
| 3429 | } | ||
| 3430 | |||
| 3431 | // Make the page group, if we're supposed to. | ||
| 3432 | if (out_pg != nullptr) { | ||
| 3433 | R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); | ||
| 3434 | } | ||
| 3435 | |||
| 3436 | // Create an update allocator. | ||
| 3437 | Result allocator_result{ResultSuccess}; | ||
| 3438 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3439 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3440 | R_TRY(allocator_result); | ||
| 3441 | |||
| 3442 | // Decide on new perm and attr. | ||
| 3443 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 3444 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); | ||
| 3445 | |||
| 3446 | // Update permission, if we need to. | ||
| 3447 | if (new_perm != old_perm) { | ||
| 3448 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 3449 | } | ||
| 3450 | |||
| 3451 | // Apply the memory block updates. | ||
| 3452 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 3453 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, | ||
| 3454 | KMemoryBlockDisableMergeAttribute::None); | ||
| 3455 | |||
| 3456 | // If we have an output page group, open. | ||
| 3457 | if (out_pg) { | ||
| 3458 | out_pg->Open(); | ||
| 3459 | } | ||
| 3460 | |||
| 3461 | R_SUCCEED(); | ||
| 3462 | } | ||
| 3463 | |||
| 3464 | Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 3465 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 3466 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 3467 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 3468 | KMemoryAttribute lock_attr, const KPageGroup* pg) { | ||
| 3469 | // Validate basic preconditions. | ||
| 3470 | ASSERT((attr_mask & lock_attr) == lock_attr); | ||
| 3471 | ASSERT((attr & lock_attr) == lock_attr); | ||
| 3472 | |||
| 3473 | // Validate the unlock request. | ||
| 3474 | const size_t num_pages = size / PageSize; | ||
| 3475 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 3476 | |||
| 3477 | // Lock the table. | ||
| 3478 | KScopedLightLock lk(m_general_lock); | ||
| 3479 | |||
| 3480 | // Check the state. | ||
| 3481 | KMemoryState old_state{}; | ||
| 3482 | KMemoryPermission old_perm{}; | ||
| 3483 | KMemoryAttribute old_attr{}; | ||
| 3484 | size_t num_allocator_blocks{}; | ||
| 3485 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 3486 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 3487 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 3488 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 3489 | attr_mask, attr)); | ||
| 3490 | |||
| 3491 | // Check the page group. | ||
| 3492 | if (pg != nullptr) { | ||
| 3493 | R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion); | ||
| 3494 | } | ||
| 3495 | |||
| 3496 | // Decide on new perm and attr. | ||
| 3497 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 3498 | KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); | ||
| 3499 | |||
| 3500 | // Create an update allocator. | ||
| 3501 | Result allocator_result{ResultSuccess}; | ||
| 3502 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3503 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3504 | R_TRY(allocator_result); | ||
| 3505 | |||
| 3506 | // Update permission, if we need to. | ||
| 3507 | if (new_perm != old_perm) { | ||
| 3508 | R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); | ||
| 3509 | } | ||
| 3510 | |||
| 3511 | // Apply the memory block updates. | ||
| 3512 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 3513 | new_attr, KMemoryBlockDisableMergeAttribute::None, | ||
| 3514 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 3515 | |||
| 3516 | R_SUCCEED(); | ||
| 3517 | } | ||
| 3518 | |||
| 3519 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 66f16faaf..5541bc13f 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -3,548 +3,14 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <memory> | 6 | #include "core/hle/kernel/k_page_table_base.h" |
| 7 | |||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/page_table.h" | ||
| 10 | #include "core/file_sys/program_metadata.h" | ||
| 11 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 12 | #include "core/hle/kernel/k_light_lock.h" | ||
| 13 | #include "core/hle/kernel/k_memory_block.h" | ||
| 14 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 15 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 16 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 17 | #include "core/hle/kernel/k_typed_address.h" | ||
| 18 | #include "core/hle/result.h" | ||
| 19 | #include "core/memory.h" | ||
| 20 | |||
| 21 | namespace Core { | ||
| 22 | class System; | ||
| 23 | } | ||
| 24 | 7 | ||
| 25 | namespace Kernel { | 8 | namespace Kernel { |
| 26 | 9 | ||
| 27 | enum class DisableMergeAttribute : u8 { | 10 | class KPageTable final : public KPageTableBase { |
| 28 | None = (0U << 0), | ||
| 29 | DisableHead = (1U << 0), | ||
| 30 | DisableHeadAndBody = (1U << 1), | ||
| 31 | EnableHeadAndBody = (1U << 2), | ||
| 32 | DisableTail = (1U << 3), | ||
| 33 | EnableTail = (1U << 4), | ||
| 34 | EnableAndMergeHeadBodyTail = (1U << 5), | ||
| 35 | EnableHeadBodyTail = EnableHeadAndBody | EnableTail, | ||
| 36 | DisableHeadBodyTail = DisableHeadAndBody | DisableTail, | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct KPageProperties { | ||
| 40 | KMemoryPermission perm; | ||
| 41 | bool io; | ||
| 42 | bool uncached; | ||
| 43 | DisableMergeAttribute disable_merge_attributes; | ||
| 44 | }; | ||
| 45 | static_assert(std::is_trivial_v<KPageProperties>); | ||
| 46 | static_assert(sizeof(KPageProperties) == sizeof(u32)); | ||
| 47 | |||
| 48 | class KBlockInfoManager; | ||
| 49 | class KMemoryBlockManager; | ||
| 50 | class KResourceLimit; | ||
| 51 | class KSystemResource; | ||
| 52 | |||
| 53 | class KPageTable final { | ||
| 54 | protected: | ||
| 55 | struct PageLinkedList; | ||
| 56 | |||
| 57 | public: | ||
| 58 | enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll }; | ||
| 59 | |||
| 60 | YUZU_NON_COPYABLE(KPageTable); | ||
| 61 | YUZU_NON_MOVEABLE(KPageTable); | ||
| 62 | |||
| 63 | explicit KPageTable(Core::System& system_); | ||
| 64 | ~KPageTable(); | ||
| 65 | |||
| 66 | Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 67 | bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, | ||
| 68 | KProcessAddress code_addr, size_t code_size, | ||
| 69 | KSystemResource* system_resource, KResourceLimit* resource_limit, | ||
| 70 | Core::Memory::Memory& memory); | ||
| 71 | |||
| 72 | void Finalize(); | ||
| 73 | |||
| 74 | Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state, | ||
| 75 | KMemoryPermission perm); | ||
| 76 | Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 77 | Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 78 | ICacheInvalidationStrategy icache_invalidation_strategy); | ||
| 79 | Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table, | ||
| 80 | KProcessAddress src_addr); | ||
| 81 | Result MapPhysicalMemory(KProcessAddress addr, size_t size); | ||
| 82 | Result UnmapPhysicalMemory(KProcessAddress addr, size_t size); | ||
| 83 | Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size); | ||
| 84 | Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size); | ||
| 85 | Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 86 | Svc::MemoryPermission svc_perm); | ||
| 87 | KMemoryInfo QueryInfo(KProcessAddress addr); | ||
| 88 | Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm); | ||
| 89 | Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr); | ||
| 90 | Result SetMaxHeapSize(size_t size); | ||
| 91 | Result SetHeapSize(u64* out, size_t size); | ||
| 92 | Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, | ||
| 93 | KMemoryPermission perm, bool is_aligned, bool check_heap); | ||
| 94 | Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap); | ||
| 95 | |||
| 96 | Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size); | ||
| 97 | |||
| 98 | Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size); | ||
| 99 | Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); | ||
| 100 | |||
| 101 | Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 102 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 103 | KMemoryState dst_state, bool send); | ||
| 104 | Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 105 | Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 106 | |||
| 107 | Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 108 | KMemoryPermission perm); | ||
| 109 | Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg); | ||
| 110 | Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size); | ||
| 111 | Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg); | ||
| 112 | Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 113 | KMemoryState state_mask, KMemoryState state, | ||
| 114 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 115 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | ||
| 116 | |||
| 117 | Common::PageTable& PageTableImpl() { | ||
| 118 | return *m_page_table_impl; | ||
| 119 | } | ||
| 120 | |||
| 121 | const Common::PageTable& PageTableImpl() const { | ||
| 122 | return *m_page_table_impl; | ||
| 123 | } | ||
| 124 | |||
| 125 | KBlockInfoManager* GetBlockInfoManager() { | ||
| 126 | return m_block_info_manager; | ||
| 127 | } | ||
| 128 | |||
| 129 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 130 | KPhysicalAddress phys_addr, KProcessAddress region_start, | ||
| 131 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | ||
| 132 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, | ||
| 133 | region_num_pages, state, perm)); | ||
| 134 | } | ||
| 135 | |||
| 136 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 137 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 138 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 139 | this->GetRegionAddress(state), | ||
| 140 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 141 | } | ||
| 142 | |||
| 143 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 144 | KMemoryPermission perm) { | ||
| 145 | R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, | ||
| 146 | this->GetRegionAddress(state), | ||
| 147 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 148 | } | ||
| 149 | |||
| 150 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 151 | KMemoryPermission perm); | ||
| 152 | Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); | ||
| 153 | |||
| 154 | Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 155 | KProcessAddress region_start, size_t region_num_pages, KMemoryState state, | ||
| 156 | KMemoryPermission perm); | ||
| 157 | Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, | ||
| 158 | KMemoryPermission perm); | ||
| 159 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); | ||
| 160 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 161 | const KPageGroup& pg); | ||
| 162 | |||
| 163 | KProcessAddress GetRegionAddress(Svc::MemoryState state) const; | ||
| 164 | size_t GetRegionSize(Svc::MemoryState state) const; | ||
| 165 | bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; | ||
| 166 | |||
| 167 | KProcessAddress GetRegionAddress(KMemoryState state) const { | ||
| 168 | return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 169 | } | ||
| 170 | size_t GetRegionSize(KMemoryState state) const { | ||
| 171 | return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 172 | } | ||
| 173 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 174 | return this->CanContain(addr, size, | ||
| 175 | static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 176 | } | ||
| 177 | |||
| 178 | protected: | ||
| 179 | struct PageLinkedList { | ||
| 180 | private: | ||
| 181 | struct Node { | ||
| 182 | Node* m_next; | ||
| 183 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 184 | }; | ||
| 185 | |||
| 186 | public: | ||
| 187 | constexpr PageLinkedList() = default; | ||
| 188 | |||
| 189 | void Push(Node* n) { | ||
| 190 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 191 | n->m_next = m_root; | ||
| 192 | m_root = n; | ||
| 193 | } | ||
| 194 | |||
| 195 | void Push(Core::Memory::Memory& memory, KVirtualAddress addr) { | ||
| 196 | this->Push(memory.GetPointer<Node>(GetInteger(addr))); | ||
| 197 | } | ||
| 198 | |||
| 199 | Node* Peek() const { | ||
| 200 | return m_root; | ||
| 201 | } | ||
| 202 | |||
| 203 | Node* Pop() { | ||
| 204 | Node* const r = m_root; | ||
| 205 | |||
| 206 | m_root = r->m_next; | ||
| 207 | r->m_next = nullptr; | ||
| 208 | |||
| 209 | return r; | ||
| 210 | } | ||
| 211 | |||
| 212 | private: | ||
| 213 | Node* m_root{}; | ||
| 214 | }; | ||
| 215 | static_assert(std::is_trivially_destructible<PageLinkedList>::value); | ||
| 216 | |||
| 217 | private: | ||
| 218 | enum class OperationType : u32 { | ||
| 219 | Map = 0, | ||
| 220 | MapGroup = 1, | ||
| 221 | MapFirstGroup = 2, | ||
| 222 | Unmap = 3, | ||
| 223 | ChangePermissions = 4, | ||
| 224 | ChangePermissionsAndRefresh = 5, | ||
| 225 | ChangePermissionsAndRefreshAndFlush = 6, | ||
| 226 | Separate = 7, | ||
| 227 | }; | ||
| 228 | |||
| 229 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | ||
| 230 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | ||
| 231 | |||
| 232 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 233 | KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, | ||
| 234 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm); | ||
| 235 | bool IsRegionContiguous(KProcessAddress addr, u64 size) const; | ||
| 236 | void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list); | ||
| 237 | KMemoryInfo QueryInfoImpl(KProcessAddress addr); | ||
| 238 | KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages, | ||
| 239 | u64 needed_num_pages, size_t align); | ||
| 240 | Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group, | ||
| 241 | OperationType operation); | ||
| 242 | Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, | ||
| 243 | OperationType operation, KPhysicalAddress map_addr = 0); | ||
| 244 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 245 | |||
| 246 | KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 247 | size_t num_pages, size_t alignment, size_t offset, | ||
| 248 | size_t guard_pages); | ||
| 249 | |||
| 250 | Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 251 | KMemoryState state_mask, KMemoryState state, | ||
| 252 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 253 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 254 | Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 255 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 256 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 257 | KMemoryAttribute attr) const { | ||
| 258 | R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, | ||
| 259 | perm, attr_mask, attr)); | ||
| 260 | } | ||
| 261 | |||
| 262 | Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, | ||
| 263 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 264 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 265 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 266 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 267 | KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, | ||
| 268 | KMemoryState state_mask, KMemoryState state, | ||
| 269 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 270 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 271 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 272 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 273 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 274 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 275 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 276 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 277 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 278 | Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 279 | KMemoryState state_mask, KMemoryState state, | ||
| 280 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 281 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 282 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 283 | R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | ||
| 284 | state_mask, state, perm_mask, perm, attr_mask, attr, | ||
| 285 | ignore_attr)); | ||
| 286 | } | ||
| 287 | Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 288 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 289 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 290 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 291 | R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, | ||
| 292 | attr_mask, attr, ignore_attr)); | ||
| 293 | } | ||
| 294 | |||
| 295 | Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress, | ||
| 296 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 297 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 298 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 299 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 300 | KMemoryAttribute lock_attr); | ||
| 301 | Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 302 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 303 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 304 | KMemoryPermission new_perm, KMemoryAttribute lock_attr, | ||
| 305 | const KPageGroup* pg); | ||
| 306 | |||
| 307 | Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 308 | bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 309 | |||
| 310 | bool IsLockedByCurrentThread() const { | ||
| 311 | return m_general_lock.IsLockedByCurrentThread(); | ||
| 312 | } | ||
| 313 | |||
| 314 | bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) { | ||
| 315 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 316 | |||
| 317 | return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); | ||
| 318 | } | ||
| 319 | |||
| 320 | bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const { | ||
| 321 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 322 | |||
| 323 | *out = GetPhysicalAddr(virt_addr); | ||
| 324 | |||
| 325 | return *out != 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 329 | KProcessAddress address, size_t size, KMemoryPermission test_perm, | ||
| 330 | KMemoryState dst_state); | ||
| 331 | Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr, | ||
| 332 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 333 | KPageTable& src_page_table, bool send); | ||
| 334 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address, | ||
| 335 | size_t size, KMemoryPermission prot_perm); | ||
| 336 | |||
| 337 | Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 338 | size_t num_pages, KMemoryPermission perm); | ||
| 339 | Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 340 | const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); | ||
| 341 | |||
| 342 | mutable KLightLock m_general_lock; | ||
| 343 | mutable KLightLock m_map_physical_memory_lock; | ||
| 344 | |||
| 345 | public: | ||
| 346 | constexpr KProcessAddress GetAddressSpaceStart() const { | ||
| 347 | return m_address_space_start; | ||
| 348 | } | ||
| 349 | constexpr KProcessAddress GetAddressSpaceEnd() const { | ||
| 350 | return m_address_space_end; | ||
| 351 | } | ||
| 352 | constexpr size_t GetAddressSpaceSize() const { | ||
| 353 | return m_address_space_end - m_address_space_start; | ||
| 354 | } | ||
| 355 | constexpr KProcessAddress GetHeapRegionStart() const { | ||
| 356 | return m_heap_region_start; | ||
| 357 | } | ||
| 358 | constexpr KProcessAddress GetHeapRegionEnd() const { | ||
| 359 | return m_heap_region_end; | ||
| 360 | } | ||
| 361 | constexpr size_t GetHeapRegionSize() const { | ||
| 362 | return m_heap_region_end - m_heap_region_start; | ||
| 363 | } | ||
| 364 | constexpr KProcessAddress GetAliasRegionStart() const { | ||
| 365 | return m_alias_region_start; | ||
| 366 | } | ||
| 367 | constexpr KProcessAddress GetAliasRegionEnd() const { | ||
| 368 | return m_alias_region_end; | ||
| 369 | } | ||
| 370 | constexpr size_t GetAliasRegionSize() const { | ||
| 371 | return m_alias_region_end - m_alias_region_start; | ||
| 372 | } | ||
| 373 | constexpr KProcessAddress GetStackRegionStart() const { | ||
| 374 | return m_stack_region_start; | ||
| 375 | } | ||
| 376 | constexpr KProcessAddress GetStackRegionEnd() const { | ||
| 377 | return m_stack_region_end; | ||
| 378 | } | ||
| 379 | constexpr size_t GetStackRegionSize() const { | ||
| 380 | return m_stack_region_end - m_stack_region_start; | ||
| 381 | } | ||
| 382 | constexpr KProcessAddress GetKernelMapRegionStart() const { | ||
| 383 | return m_kernel_map_region_start; | ||
| 384 | } | ||
| 385 | constexpr KProcessAddress GetKernelMapRegionEnd() const { | ||
| 386 | return m_kernel_map_region_end; | ||
| 387 | } | ||
| 388 | constexpr KProcessAddress GetCodeRegionStart() const { | ||
| 389 | return m_code_region_start; | ||
| 390 | } | ||
| 391 | constexpr KProcessAddress GetCodeRegionEnd() const { | ||
| 392 | return m_code_region_end; | ||
| 393 | } | ||
| 394 | constexpr KProcessAddress GetAliasCodeRegionStart() const { | ||
| 395 | return m_alias_code_region_start; | ||
| 396 | } | ||
| 397 | constexpr KProcessAddress GetAliasCodeRegionEnd() const { | ||
| 398 | return m_alias_code_region_end; | ||
| 399 | } | ||
| 400 | constexpr size_t GetAliasCodeRegionSize() const { | ||
| 401 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 402 | } | ||
| 403 | size_t GetNormalMemorySize() const { | ||
| 404 | KScopedLightLock lk(m_general_lock); | ||
| 405 | return GetHeapSize() + m_mapped_physical_memory_size; | ||
| 406 | } | ||
| 407 | constexpr size_t GetAddressSpaceWidth() const { | ||
| 408 | return m_address_space_width; | ||
| 409 | } | ||
| 410 | constexpr size_t GetHeapSize() const { | ||
| 411 | return m_current_heap_end - m_heap_region_start; | ||
| 412 | } | ||
| 413 | constexpr size_t GetNumGuardPages() const { | ||
| 414 | return IsKernel() ? 1 : 4; | ||
| 415 | } | ||
| 416 | KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const { | ||
| 417 | const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; | ||
| 418 | ASSERT(backing_addr); | ||
| 419 | return backing_addr + GetInteger(addr); | ||
| 420 | } | ||
| 421 | constexpr bool Contains(KProcessAddress addr) const { | ||
| 422 | return m_address_space_start <= addr && addr <= m_address_space_end - 1; | ||
| 423 | } | ||
| 424 | constexpr bool Contains(KProcessAddress addr, size_t size) const { | ||
| 425 | return m_address_space_start <= addr && addr < addr + size && | ||
| 426 | addr + size - 1 <= m_address_space_end - 1; | ||
| 427 | } | ||
| 428 | constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 429 | return this->Contains(addr, size) && m_alias_region_start <= addr && | ||
| 430 | addr + size - 1 <= m_alias_region_end - 1; | ||
| 431 | } | ||
| 432 | constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const { | ||
| 433 | return this->Contains(addr, size) && m_heap_region_start <= addr && | ||
| 434 | addr + size - 1 <= m_heap_region_end - 1; | ||
| 435 | } | ||
| 436 | |||
| 437 | public: | 11 | public: |
| 438 | static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout, | 12 | explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {} |
| 439 | KPhysicalAddress addr) { | 13 | ~KPageTable() = default; |
| 440 | return layout.GetLinearVirtualAddress(addr); | ||
| 441 | } | ||
| 442 | |||
| 443 | static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, | ||
| 444 | KVirtualAddress addr) { | ||
| 445 | return layout.GetLinearPhysicalAddress(addr); | ||
| 446 | } | ||
| 447 | |||
| 448 | static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout, | ||
| 449 | KPhysicalAddress addr) { | ||
| 450 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 451 | } | ||
| 452 | |||
| 453 | static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout, | ||
| 454 | KVirtualAddress addr) { | ||
| 455 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 456 | } | ||
| 457 | |||
| 458 | static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout, | ||
| 459 | KPhysicalAddress addr) { | ||
| 460 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 461 | } | ||
| 462 | |||
| 463 | static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout, | ||
| 464 | KVirtualAddress addr) { | ||
| 465 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 466 | } | ||
| 467 | |||
| 468 | private: | ||
| 469 | constexpr bool IsKernel() const { | ||
| 470 | return m_is_kernel; | ||
| 471 | } | ||
| 472 | constexpr bool IsAslrEnabled() const { | ||
| 473 | return m_enable_aslr; | ||
| 474 | } | ||
| 475 | |||
| 476 | constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const { | ||
| 477 | return (m_address_space_start <= addr) && | ||
| 478 | (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && | ||
| 479 | (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); | ||
| 480 | } | ||
| 481 | |||
| 482 | private: | ||
| 483 | class KScopedPageTableUpdater { | ||
| 484 | private: | ||
| 485 | KPageTable* m_pt{}; | ||
| 486 | PageLinkedList m_ll; | ||
| 487 | |||
| 488 | public: | ||
| 489 | explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} | ||
| 490 | explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} | ||
| 491 | ~KScopedPageTableUpdater() { | ||
| 492 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 493 | } | ||
| 494 | |||
| 495 | PageLinkedList* GetPageList() { | ||
| 496 | return std::addressof(m_ll); | ||
| 497 | } | ||
| 498 | }; | ||
| 499 | |||
| 500 | private: | ||
| 501 | KProcessAddress m_address_space_start{}; | ||
| 502 | KProcessAddress m_address_space_end{}; | ||
| 503 | KProcessAddress m_heap_region_start{}; | ||
| 504 | KProcessAddress m_heap_region_end{}; | ||
| 505 | KProcessAddress m_current_heap_end{}; | ||
| 506 | KProcessAddress m_alias_region_start{}; | ||
| 507 | KProcessAddress m_alias_region_end{}; | ||
| 508 | KProcessAddress m_stack_region_start{}; | ||
| 509 | KProcessAddress m_stack_region_end{}; | ||
| 510 | KProcessAddress m_kernel_map_region_start{}; | ||
| 511 | KProcessAddress m_kernel_map_region_end{}; | ||
| 512 | KProcessAddress m_code_region_start{}; | ||
| 513 | KProcessAddress m_code_region_end{}; | ||
| 514 | KProcessAddress m_alias_code_region_start{}; | ||
| 515 | KProcessAddress m_alias_code_region_end{}; | ||
| 516 | |||
| 517 | size_t m_max_heap_size{}; | ||
| 518 | size_t m_mapped_physical_memory_size{}; | ||
| 519 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 520 | size_t m_mapped_insecure_memory{}; | ||
| 521 | size_t m_mapped_ipc_server_memory{}; | ||
| 522 | size_t m_address_space_width{}; | ||
| 523 | |||
| 524 | KMemoryBlockManager m_memory_block_manager; | ||
| 525 | u32 m_allocate_option{}; | ||
| 526 | |||
| 527 | bool m_is_kernel{}; | ||
| 528 | bool m_enable_aslr{}; | ||
| 529 | bool m_enable_device_address_space_merge{}; | ||
| 530 | |||
| 531 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | ||
| 532 | KBlockInfoManager* m_block_info_manager{}; | ||
| 533 | KResourceLimit* m_resource_limit{}; | ||
| 534 | |||
| 535 | u32 m_heap_fill_value{}; | ||
| 536 | u32 m_ipc_fill_value{}; | ||
| 537 | u32 m_stack_fill_value{}; | ||
| 538 | const KMemoryRegion* m_cached_physical_heap_region{}; | ||
| 539 | |||
| 540 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; | ||
| 541 | KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; | ||
| 542 | |||
| 543 | std::unique_ptr<Common::PageTable> m_page_table_impl; | ||
| 544 | |||
| 545 | Core::System& m_system; | ||
| 546 | KernelCore& m_kernel; | ||
| 547 | Core::Memory::Memory* m_memory{}; | ||
| 548 | }; | 14 | }; |
| 549 | 15 | ||
| 550 | } // namespace Kernel | 16 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp new file mode 100644 index 000000000..6a57ad55c --- /dev/null +++ b/src/core/hle/kernel/k_page_table_base.cpp | |||
| @@ -0,0 +1,5716 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "common/scope_exit.h" | ||
| 5 | #include "common/settings.h" | ||
| 6 | #include "core/core.h" | ||
| 7 | #include "core/hle/kernel/k_address_space_info.h" | ||
| 8 | #include "core/hle/kernel/k_page_table_base.h" | ||
| 9 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | ||
| 10 | #include "core/hle/kernel/k_system_resource.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | namespace { | ||
| 15 | |||
| 16 | class KScopedLightLockPair { | ||
| 17 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 18 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 19 | |||
| 20 | private: | ||
| 21 | KLightLock* m_lower; | ||
| 22 | KLightLock* m_upper; | ||
| 23 | |||
| 24 | public: | ||
| 25 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 26 | // Ensure our locks are in a consistent order. | ||
| 27 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 28 | m_lower = std::addressof(lhs); | ||
| 29 | m_upper = std::addressof(rhs); | ||
| 30 | } else { | ||
| 31 | m_lower = std::addressof(rhs); | ||
| 32 | m_upper = std::addressof(lhs); | ||
| 33 | } | ||
| 34 | |||
| 35 | // Acquire both locks. | ||
| 36 | m_lower->Lock(); | ||
| 37 | if (m_lower != m_upper) { | ||
| 38 | m_upper->Lock(); | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | ~KScopedLightLockPair() { | ||
| 43 | // Unlock the upper lock. | ||
| 44 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 45 | m_upper->Unlock(); | ||
| 46 | } | ||
| 47 | |||
| 48 | // Unlock the lower lock. | ||
| 49 | if (m_lower != nullptr) { | ||
| 50 | m_lower->Unlock(); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | public: | ||
| 55 | // Utility. | ||
| 56 | void TryUnlockHalf(KLightLock& lock) { | ||
| 57 | // Only allow unlocking if the lock is half the pair. | ||
| 58 | if (m_lower != m_upper) { | ||
| 59 | // We want to be sure the lock is one we own. | ||
| 60 | if (m_lower == std::addressof(lock)) { | ||
| 61 | lock.Unlock(); | ||
| 62 | m_lower = nullptr; | ||
| 63 | } else if (m_upper == std::addressof(lock)) { | ||
| 64 | lock.Unlock(); | ||
| 65 | m_upper = nullptr; | ||
| 66 | } | ||
| 67 | } | ||
| 68 | } | ||
| 69 | }; | ||
| 70 | |||
| 71 | template <typename AddressType> | ||
| 72 | void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) { | ||
| 73 | system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size); | ||
| 74 | } | ||
| 75 | |||
| 76 | template <typename AddressType> | ||
| 77 | Result InvalidateDataCache(AddressType addr, u64 size) { | ||
| 78 | R_SUCCEED(); | ||
| 79 | } | ||
| 80 | |||
| 81 | template <typename AddressType> | ||
| 82 | Result StoreDataCache(AddressType addr, u64 size) { | ||
| 83 | R_SUCCEED(); | ||
| 84 | } | ||
| 85 | |||
| 86 | template <typename AddressType> | ||
| 87 | Result FlushDataCache(AddressType addr, u64 size) { | ||
| 88 | R_SUCCEED(); | ||
| 89 | } | ||
| 90 | |||
| 91 | } // namespace | ||
| 92 | |||
| 93 | void KPageTableBase::MemoryRange::Open() { | ||
| 94 | // If the range contains heap pages, open them. | ||
| 95 | if (this->IsHeap()) { | ||
| 96 | m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize); | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | void KPageTableBase::MemoryRange::Close() { | ||
| 101 | // If the range contains heap pages, close them. | ||
| 102 | if (this->IsHeap()) { | ||
| 103 | m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize); | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | KPageTableBase::KPageTableBase(KernelCore& kernel) | ||
| 108 | : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel), | ||
| 109 | m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {} | ||
| 110 | KPageTableBase::~KPageTableBase() = default; | ||
| 111 | |||
| 112 | Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start, | ||
| 113 | KVirtualAddress end, Core::Memory::Memory& memory) { | ||
| 114 | // Initialize our members. | ||
| 115 | m_address_space_width = | ||
| 116 | static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>()); | ||
| 117 | m_address_space_start = KProcessAddress(GetInteger(start)); | ||
| 118 | m_address_space_end = KProcessAddress(GetInteger(end)); | ||
| 119 | m_is_kernel = true; | ||
| 120 | m_enable_aslr = true; | ||
| 121 | m_enable_device_address_space_merge = false; | ||
| 122 | |||
| 123 | m_heap_region_start = 0; | ||
| 124 | m_heap_region_end = 0; | ||
| 125 | m_current_heap_end = 0; | ||
| 126 | m_alias_region_start = 0; | ||
| 127 | m_alias_region_end = 0; | ||
| 128 | m_stack_region_start = 0; | ||
| 129 | m_stack_region_end = 0; | ||
| 130 | m_kernel_map_region_start = 0; | ||
| 131 | m_kernel_map_region_end = 0; | ||
| 132 | m_alias_code_region_start = 0; | ||
| 133 | m_alias_code_region_end = 0; | ||
| 134 | m_code_region_start = 0; | ||
| 135 | m_code_region_end = 0; | ||
| 136 | m_max_heap_size = 0; | ||
| 137 | m_mapped_physical_memory_size = 0; | ||
| 138 | m_mapped_unsafe_physical_memory = 0; | ||
| 139 | m_mapped_insecure_memory = 0; | ||
| 140 | m_mapped_ipc_server_memory = 0; | ||
| 141 | |||
| 142 | m_memory_block_slab_manager = | ||
| 143 | m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer(); | ||
| 144 | m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer(); | ||
| 145 | m_resource_limit = m_kernel.GetSystemResourceLimit(); | ||
| 146 | |||
| 147 | m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System, | ||
| 148 | KMemoryManager::Direction::FromFront); | ||
| 149 | m_heap_fill_value = MemoryFillValue_Zero; | ||
| 150 | m_ipc_fill_value = MemoryFillValue_Zero; | ||
| 151 | m_stack_fill_value = MemoryFillValue_Zero; | ||
| 152 | |||
| 153 | m_cached_physical_linear_region = nullptr; | ||
| 154 | m_cached_physical_heap_region = nullptr; | ||
| 155 | |||
| 156 | // Initialize our implementation. | ||
| 157 | m_impl = std::make_unique<Common::PageTable>(); | ||
| 158 | m_impl->Resize(m_address_space_width, PageBits); | ||
| 159 | |||
| 160 | // Set the tracking memory. | ||
| 161 | m_memory = std::addressof(memory); | ||
| 162 | |||
| 163 | // Initialize our memory block manager. | ||
| 164 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 165 | m_memory_block_slab_manager)); | ||
| 166 | } | ||
| 167 | |||
| 168 | Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 169 | bool enable_das_merge, bool from_back, | ||
| 170 | KMemoryManager::Pool pool, KProcessAddress code_address, | ||
| 171 | size_t code_size, KSystemResource* system_resource, | ||
| 172 | KResourceLimit* resource_limit, | ||
| 173 | Core::Memory::Memory& memory) { | ||
| 174 | // Calculate region extents. | ||
| 175 | const size_t as_width = GetAddressSpaceWidth(as_type); | ||
| 176 | const KProcessAddress start = 0; | ||
| 177 | const KProcessAddress end = (1ULL << as_width); | ||
| 178 | |||
| 179 | // Validate the region. | ||
| 180 | ASSERT(start <= code_address); | ||
| 181 | ASSERT(code_address < code_address + code_size); | ||
| 182 | ASSERT(code_address + code_size - 1 <= end - 1); | ||
| 183 | |||
| 184 | // Define helpers. | ||
| 185 | auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) { | ||
| 186 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | ||
| 187 | }; | ||
| 188 | auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) { | ||
| 189 | return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); | ||
| 190 | }; | ||
| 191 | |||
| 192 | // Set our bit width and heap/alias sizes. | ||
| 193 | m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type)); | ||
| 194 | size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | ||
| 195 | size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | ||
| 196 | |||
| 197 | // Adjust heap/alias size if we don't have an alias region. | ||
| 198 | if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) == | ||
| 199 | Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) { | ||
| 200 | heap_region_size += alias_region_size; | ||
| 201 | alias_region_size = 0; | ||
| 202 | } | ||
| 203 | |||
| 204 | // Set code regions and determine remaining sizes. | ||
| 205 | KProcessAddress process_code_start; | ||
| 206 | KProcessAddress process_code_end; | ||
| 207 | size_t stack_region_size; | ||
| 208 | size_t kernel_map_region_size; | ||
| 209 | if (m_address_space_width == 39) { | ||
| 210 | alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); | ||
| 211 | heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); | ||
| 212 | stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); | ||
| 213 | kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 214 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); | ||
| 215 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); | ||
| 216 | m_alias_code_region_start = m_code_region_start; | ||
| 217 | m_alias_code_region_end = m_code_region_end; | ||
| 218 | process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment); | ||
| 219 | process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment); | ||
| 220 | } else { | ||
| 221 | stack_region_size = 0; | ||
| 222 | kernel_map_region_size = 0; | ||
| 223 | m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); | ||
| 224 | m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); | ||
| 225 | m_stack_region_start = m_code_region_start; | ||
| 226 | m_alias_code_region_start = m_code_region_start; | ||
| 227 | m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + | ||
| 228 | GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); | ||
| 229 | m_stack_region_end = m_code_region_end; | ||
| 230 | m_kernel_map_region_start = m_code_region_start; | ||
| 231 | m_kernel_map_region_end = m_code_region_end; | ||
| 232 | process_code_start = m_code_region_start; | ||
| 233 | process_code_end = m_code_region_end; | ||
| 234 | } | ||
| 235 | |||
| 236 | // Set other basic fields. | ||
| 237 | m_enable_aslr = enable_aslr; | ||
| 238 | m_enable_device_address_space_merge = enable_das_merge; | ||
| 239 | m_address_space_start = start; | ||
| 240 | m_address_space_end = end; | ||
| 241 | m_is_kernel = false; | ||
| 242 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); | ||
| 243 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 244 | m_resource_limit = resource_limit; | ||
| 245 | |||
| 246 | // Determine the region we can place our undetermineds in. | ||
| 247 | KProcessAddress alloc_start; | ||
| 248 | size_t alloc_size; | ||
| 249 | if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= | ||
| 250 | (GetInteger(end) - GetInteger(process_code_end))) { | ||
| 251 | alloc_start = m_code_region_start; | ||
| 252 | alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start); | ||
| 253 | } else { | ||
| 254 | alloc_start = process_code_end; | ||
| 255 | alloc_size = GetInteger(end) - GetInteger(process_code_end); | ||
| 256 | } | ||
| 257 | const size_t needed_size = | ||
| 258 | (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); | ||
| 259 | R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); | ||
| 260 | |||
| 261 | const size_t remaining_size = alloc_size - needed_size; | ||
| 262 | |||
| 263 | // Determine random placements for each region. | ||
| 264 | size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0; | ||
| 265 | if (enable_aslr) { | ||
| 266 | alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 267 | RegionAlignment; | ||
| 268 | heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 269 | RegionAlignment; | ||
| 270 | stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 271 | RegionAlignment; | ||
| 272 | kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * | ||
| 273 | RegionAlignment; | ||
| 274 | } | ||
| 275 | |||
| 276 | // Setup heap and alias regions. | ||
| 277 | m_alias_region_start = alloc_start + alias_rnd; | ||
| 278 | m_alias_region_end = m_alias_region_start + alias_region_size; | ||
| 279 | m_heap_region_start = alloc_start + heap_rnd; | ||
| 280 | m_heap_region_end = m_heap_region_start + heap_region_size; | ||
| 281 | |||
| 282 | if (alias_rnd <= heap_rnd) { | ||
| 283 | m_heap_region_start += alias_region_size; | ||
| 284 | m_heap_region_end += alias_region_size; | ||
| 285 | } else { | ||
| 286 | m_alias_region_start += heap_region_size; | ||
| 287 | m_alias_region_end += heap_region_size; | ||
| 288 | } | ||
| 289 | |||
| 290 | // Setup stack region. | ||
| 291 | if (stack_region_size) { | ||
| 292 | m_stack_region_start = alloc_start + stack_rnd; | ||
| 293 | m_stack_region_end = m_stack_region_start + stack_region_size; | ||
| 294 | |||
| 295 | if (alias_rnd < stack_rnd) { | ||
| 296 | m_stack_region_start += alias_region_size; | ||
| 297 | m_stack_region_end += alias_region_size; | ||
| 298 | } else { | ||
| 299 | m_alias_region_start += stack_region_size; | ||
| 300 | m_alias_region_end += stack_region_size; | ||
| 301 | } | ||
| 302 | |||
| 303 | if (heap_rnd < stack_rnd) { | ||
| 304 | m_stack_region_start += heap_region_size; | ||
| 305 | m_stack_region_end += heap_region_size; | ||
| 306 | } else { | ||
| 307 | m_heap_region_start += stack_region_size; | ||
| 308 | m_heap_region_end += stack_region_size; | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | // Setup kernel map region. | ||
| 313 | if (kernel_map_region_size) { | ||
| 314 | m_kernel_map_region_start = alloc_start + kmap_rnd; | ||
| 315 | m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; | ||
| 316 | |||
| 317 | if (alias_rnd < kmap_rnd) { | ||
| 318 | m_kernel_map_region_start += alias_region_size; | ||
| 319 | m_kernel_map_region_end += alias_region_size; | ||
| 320 | } else { | ||
| 321 | m_alias_region_start += kernel_map_region_size; | ||
| 322 | m_alias_region_end += kernel_map_region_size; | ||
| 323 | } | ||
| 324 | |||
| 325 | if (heap_rnd < kmap_rnd) { | ||
| 326 | m_kernel_map_region_start += heap_region_size; | ||
| 327 | m_kernel_map_region_end += heap_region_size; | ||
| 328 | } else { | ||
| 329 | m_heap_region_start += kernel_map_region_size; | ||
| 330 | m_heap_region_end += kernel_map_region_size; | ||
| 331 | } | ||
| 332 | |||
| 333 | if (stack_region_size) { | ||
| 334 | if (stack_rnd < kmap_rnd) { | ||
| 335 | m_kernel_map_region_start += stack_region_size; | ||
| 336 | m_kernel_map_region_end += stack_region_size; | ||
| 337 | } else { | ||
| 338 | m_stack_region_start += kernel_map_region_size; | ||
| 339 | m_stack_region_end += kernel_map_region_size; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | } | ||
| 343 | |||
| 344 | // Set heap and fill members. | ||
| 345 | m_current_heap_end = m_heap_region_start; | ||
| 346 | m_max_heap_size = 0; | ||
| 347 | m_mapped_physical_memory_size = 0; | ||
| 348 | m_mapped_unsafe_physical_memory = 0; | ||
| 349 | m_mapped_insecure_memory = 0; | ||
| 350 | m_mapped_ipc_server_memory = 0; | ||
| 351 | |||
| 352 | // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled(); | ||
| 353 | const bool fill_memory = false; | ||
| 354 | m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero; | ||
| 355 | m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero; | ||
| 356 | m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero; | ||
| 357 | |||
| 358 | // Set allocation option. | ||
| 359 | m_allocate_option = | ||
| 360 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 361 | : KMemoryManager::Direction::FromFront); | ||
| 362 | |||
| 363 | // Ensure that we regions inside our address space. | ||
| 364 | auto IsInAddressSpace = [&](KProcessAddress addr) { | ||
| 365 | return m_address_space_start <= addr && addr <= m_address_space_end; | ||
| 366 | }; | ||
| 367 | ASSERT(IsInAddressSpace(m_alias_region_start)); | ||
| 368 | ASSERT(IsInAddressSpace(m_alias_region_end)); | ||
| 369 | ASSERT(IsInAddressSpace(m_heap_region_start)); | ||
| 370 | ASSERT(IsInAddressSpace(m_heap_region_end)); | ||
| 371 | ASSERT(IsInAddressSpace(m_stack_region_start)); | ||
| 372 | ASSERT(IsInAddressSpace(m_stack_region_end)); | ||
| 373 | ASSERT(IsInAddressSpace(m_kernel_map_region_start)); | ||
| 374 | ASSERT(IsInAddressSpace(m_kernel_map_region_end)); | ||
| 375 | |||
| 376 | // Ensure that we selected regions that don't overlap. | ||
| 377 | const KProcessAddress alias_start = m_alias_region_start; | ||
| 378 | const KProcessAddress alias_last = m_alias_region_end - 1; | ||
| 379 | const KProcessAddress heap_start = m_heap_region_start; | ||
| 380 | const KProcessAddress heap_last = m_heap_region_end - 1; | ||
| 381 | const KProcessAddress stack_start = m_stack_region_start; | ||
| 382 | const KProcessAddress stack_last = m_stack_region_end - 1; | ||
| 383 | const KProcessAddress kmap_start = m_kernel_map_region_start; | ||
| 384 | const KProcessAddress kmap_last = m_kernel_map_region_end - 1; | ||
| 385 | ASSERT(alias_last < heap_start || heap_last < alias_start); | ||
| 386 | ASSERT(alias_last < stack_start || stack_last < alias_start); | ||
| 387 | ASSERT(alias_last < kmap_start || kmap_last < alias_start); | ||
| 388 | ASSERT(heap_last < stack_start || stack_last < heap_start); | ||
| 389 | ASSERT(heap_last < kmap_start || kmap_last < heap_start); | ||
| 390 | |||
| 391 | // Initialize our implementation. | ||
| 392 | m_impl = std::make_unique<Common::PageTable>(); | ||
| 393 | m_impl->Resize(m_address_space_width, PageBits); | ||
| 394 | |||
| 395 | // Set the tracking memory. | ||
| 396 | m_memory = std::addressof(memory); | ||
| 397 | |||
| 398 | // Initialize our memory block manager. | ||
| 399 | R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, | ||
| 400 | m_memory_block_slab_manager)); | ||
| 401 | } | ||
| 402 | |||
| 403 | void KPageTableBase::Finalize() { | ||
| 404 | auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { | ||
| 405 | if (Settings::IsFastmemEnabled()) { | ||
| 406 | m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); | ||
| 407 | } | ||
| 408 | }; | ||
| 409 | |||
| 410 | // Finalize memory blocks. | ||
| 411 | m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback)); | ||
| 412 | |||
| 413 | // Free any unsafe mapped memory. | ||
| 414 | if (m_mapped_unsafe_physical_memory) { | ||
| 415 | UNIMPLEMENTED(); | ||
| 416 | } | ||
| 417 | |||
| 418 | // Release any insecure mapped memory. | ||
| 419 | if (m_mapped_insecure_memory) { | ||
| 420 | if (auto* const insecure_resource_limit = | ||
| 421 | KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); | ||
| 422 | insecure_resource_limit != nullptr) { | ||
| 423 | insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 424 | m_mapped_insecure_memory); | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | // Release any ipc server memory. | ||
| 429 | if (m_mapped_ipc_server_memory) { | ||
| 430 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 431 | m_mapped_ipc_server_memory); | ||
| 432 | } | ||
| 433 | |||
| 434 | // Close the backing page table, as the destructor is not called for guest objects. | ||
| 435 | m_impl.reset(); | ||
| 436 | } | ||
| 437 | |||
| 438 | KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const { | ||
| 439 | switch (state) { | ||
| 440 | case Svc::MemoryState::Free: | ||
| 441 | case Svc::MemoryState::Kernel: | ||
| 442 | return m_address_space_start; | ||
| 443 | case Svc::MemoryState::Normal: | ||
| 444 | return m_heap_region_start; | ||
| 445 | case Svc::MemoryState::Ipc: | ||
| 446 | case Svc::MemoryState::NonSecureIpc: | ||
| 447 | case Svc::MemoryState::NonDeviceIpc: | ||
| 448 | return m_alias_region_start; | ||
| 449 | case Svc::MemoryState::Stack: | ||
| 450 | return m_stack_region_start; | ||
| 451 | case Svc::MemoryState::Static: | ||
| 452 | case Svc::MemoryState::ThreadLocal: | ||
| 453 | return m_kernel_map_region_start; | ||
| 454 | case Svc::MemoryState::Io: | ||
| 455 | case Svc::MemoryState::Shared: | ||
| 456 | case Svc::MemoryState::AliasCode: | ||
| 457 | case Svc::MemoryState::AliasCodeData: | ||
| 458 | case Svc::MemoryState::Transfered: | ||
| 459 | case Svc::MemoryState::SharedTransfered: | ||
| 460 | case Svc::MemoryState::SharedCode: | ||
| 461 | case Svc::MemoryState::GeneratedCode: | ||
| 462 | case Svc::MemoryState::CodeOut: | ||
| 463 | case Svc::MemoryState::Coverage: | ||
| 464 | case Svc::MemoryState::Insecure: | ||
| 465 | return m_alias_code_region_start; | ||
| 466 | case Svc::MemoryState::Code: | ||
| 467 | case Svc::MemoryState::CodeData: | ||
| 468 | return m_code_region_start; | ||
| 469 | default: | ||
| 470 | UNREACHABLE(); | ||
| 471 | } | ||
| 472 | } | ||
| 473 | |||
| 474 | size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const { | ||
| 475 | switch (state) { | ||
| 476 | case Svc::MemoryState::Free: | ||
| 477 | case Svc::MemoryState::Kernel: | ||
| 478 | return m_address_space_end - m_address_space_start; | ||
| 479 | case Svc::MemoryState::Normal: | ||
| 480 | return m_heap_region_end - m_heap_region_start; | ||
| 481 | case Svc::MemoryState::Ipc: | ||
| 482 | case Svc::MemoryState::NonSecureIpc: | ||
| 483 | case Svc::MemoryState::NonDeviceIpc: | ||
| 484 | return m_alias_region_end - m_alias_region_start; | ||
| 485 | case Svc::MemoryState::Stack: | ||
| 486 | return m_stack_region_end - m_stack_region_start; | ||
| 487 | case Svc::MemoryState::Static: | ||
| 488 | case Svc::MemoryState::ThreadLocal: | ||
| 489 | return m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 490 | case Svc::MemoryState::Io: | ||
| 491 | case Svc::MemoryState::Shared: | ||
| 492 | case Svc::MemoryState::AliasCode: | ||
| 493 | case Svc::MemoryState::AliasCodeData: | ||
| 494 | case Svc::MemoryState::Transfered: | ||
| 495 | case Svc::MemoryState::SharedTransfered: | ||
| 496 | case Svc::MemoryState::SharedCode: | ||
| 497 | case Svc::MemoryState::GeneratedCode: | ||
| 498 | case Svc::MemoryState::CodeOut: | ||
| 499 | case Svc::MemoryState::Coverage: | ||
| 500 | case Svc::MemoryState::Insecure: | ||
| 501 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 502 | case Svc::MemoryState::Code: | ||
| 503 | case Svc::MemoryState::CodeData: | ||
| 504 | return m_code_region_end - m_code_region_start; | ||
| 505 | default: | ||
| 506 | UNREACHABLE(); | ||
| 507 | } | ||
| 508 | } | ||
| 509 | |||
| 510 | bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { | ||
| 511 | const KProcessAddress end = addr + size; | ||
| 512 | const KProcessAddress last = end - 1; | ||
| 513 | |||
| 514 | const KProcessAddress region_start = this->GetRegionAddress(state); | ||
| 515 | const size_t region_size = this->GetRegionSize(state); | ||
| 516 | |||
| 517 | const bool is_in_region = | ||
| 518 | region_start <= addr && addr < end && last <= region_start + region_size - 1; | ||
| 519 | const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || | ||
| 520 | m_heap_region_start == m_heap_region_end); | ||
| 521 | const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || | ||
| 522 | m_alias_region_start == m_alias_region_end); | ||
| 523 | switch (state) { | ||
| 524 | case Svc::MemoryState::Free: | ||
| 525 | case Svc::MemoryState::Kernel: | ||
| 526 | return is_in_region; | ||
| 527 | case Svc::MemoryState::Io: | ||
| 528 | case Svc::MemoryState::Static: | ||
| 529 | case Svc::MemoryState::Code: | ||
| 530 | case Svc::MemoryState::CodeData: | ||
| 531 | case Svc::MemoryState::Shared: | ||
| 532 | case Svc::MemoryState::AliasCode: | ||
| 533 | case Svc::MemoryState::AliasCodeData: | ||
| 534 | case Svc::MemoryState::Stack: | ||
| 535 | case Svc::MemoryState::ThreadLocal: | ||
| 536 | case Svc::MemoryState::Transfered: | ||
| 537 | case Svc::MemoryState::SharedTransfered: | ||
| 538 | case Svc::MemoryState::SharedCode: | ||
| 539 | case Svc::MemoryState::GeneratedCode: | ||
| 540 | case Svc::MemoryState::CodeOut: | ||
| 541 | case Svc::MemoryState::Coverage: | ||
| 542 | case Svc::MemoryState::Insecure: | ||
| 543 | return is_in_region && !is_in_heap && !is_in_alias; | ||
| 544 | case Svc::MemoryState::Normal: | ||
| 545 | ASSERT(is_in_heap); | ||
| 546 | return is_in_region && !is_in_alias; | ||
| 547 | case Svc::MemoryState::Ipc: | ||
| 548 | case Svc::MemoryState::NonSecureIpc: | ||
| 549 | case Svc::MemoryState::NonDeviceIpc: | ||
| 550 | ASSERT(is_in_alias); | ||
| 551 | return is_in_region && !is_in_heap; | ||
| 552 | default: | ||
| 553 | return false; | ||
| 554 | } | ||
| 555 | } | ||
| 556 | |||
| 557 | Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, | ||
| 558 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 559 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 560 | KMemoryAttribute attr) const { | ||
| 561 | // Validate the states match expectation. | ||
| 562 | R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); | ||
| 563 | R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); | ||
| 564 | R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); | ||
| 565 | |||
| 566 | R_SUCCEED(); | ||
| 567 | } | ||
| 568 | |||
| 569 | Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, | ||
| 570 | size_t size, KMemoryState state_mask, | ||
| 571 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 572 | KMemoryPermission perm, | ||
| 573 | KMemoryAttribute attr_mask, | ||
| 574 | KMemoryAttribute attr) const { | ||
| 575 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 576 | |||
| 577 | // Get information about the first block. | ||
| 578 | const KProcessAddress last_addr = addr + size - 1; | ||
| 579 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 580 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 581 | |||
| 582 | // If the start address isn't aligned, we need a block. | ||
| 583 | const size_t blocks_for_start_align = | ||
| 584 | (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; | ||
| 585 | |||
| 586 | while (true) { | ||
| 587 | // Validate against the provided masks. | ||
| 588 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 589 | |||
| 590 | // Break once we're done. | ||
| 591 | if (last_addr <= info.GetLastAddress()) { | ||
| 592 | break; | ||
| 593 | } | ||
| 594 | |||
| 595 | // Advance our iterator. | ||
| 596 | it++; | ||
| 597 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 598 | info = it->GetMemoryInfo(); | ||
| 599 | } | ||
| 600 | |||
| 601 | // If the end address isn't aligned, we need a block. | ||
| 602 | const size_t blocks_for_end_align = | ||
| 603 | (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; | ||
| 604 | |||
| 605 | if (out_blocks_needed != nullptr) { | ||
| 606 | *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; | ||
| 607 | } | ||
| 608 | |||
| 609 | R_SUCCEED(); | ||
| 610 | } | ||
| 611 | |||
| 612 | Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 613 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 614 | KMemoryBlockManager::const_iterator it, | ||
| 615 | KProcessAddress last_addr, KMemoryState state_mask, | ||
| 616 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 617 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 618 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 619 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 620 | |||
| 621 | // Get information about the first block. | ||
| 622 | KMemoryInfo info = it->GetMemoryInfo(); | ||
| 623 | |||
| 624 | // Validate all blocks in the range have correct state. | ||
| 625 | const KMemoryState first_state = info.m_state; | ||
| 626 | const KMemoryPermission first_perm = info.m_permission; | ||
| 627 | const KMemoryAttribute first_attr = info.m_attribute; | ||
| 628 | while (true) { | ||
| 629 | // Validate the current block. | ||
| 630 | R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); | ||
| 631 | R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); | ||
| 632 | R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), | ||
| 633 | ResultInvalidCurrentMemory); | ||
| 634 | |||
| 635 | // Validate against the provided masks. | ||
| 636 | R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); | ||
| 637 | |||
| 638 | // Break once we're done. | ||
| 639 | if (last_addr <= info.GetLastAddress()) { | ||
| 640 | break; | ||
| 641 | } | ||
| 642 | |||
| 643 | // Advance our iterator. | ||
| 644 | it++; | ||
| 645 | ASSERT(it != m_memory_block_manager.cend()); | ||
| 646 | info = it->GetMemoryInfo(); | ||
| 647 | } | ||
| 648 | |||
| 649 | // Write output state. | ||
| 650 | if (out_state != nullptr) { | ||
| 651 | *out_state = first_state; | ||
| 652 | } | ||
| 653 | if (out_perm != nullptr) { | ||
| 654 | *out_perm = first_perm; | ||
| 655 | } | ||
| 656 | if (out_attr != nullptr) { | ||
| 657 | *out_attr = first_attr & ~ignore_attr; | ||
| 658 | } | ||
| 659 | |||
| 660 | // If the end address isn't aligned, we need a block. | ||
| 661 | if (out_blocks_needed != nullptr) { | ||
| 662 | const size_t blocks_for_end_align = | ||
| 663 | (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) | ||
| 664 | ? 1 | ||
| 665 | : 0; | ||
| 666 | *out_blocks_needed = blocks_for_end_align; | ||
| 667 | } | ||
| 668 | |||
| 669 | R_SUCCEED(); | ||
| 670 | } | ||
| 671 | |||
| 672 | Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 673 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 674 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 675 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 676 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 677 | KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { | ||
| 678 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 679 | |||
| 680 | // Check memory state. | ||
| 681 | const KProcessAddress last_addr = addr + size - 1; | ||
| 682 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); | ||
| 683 | R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, | ||
| 684 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); | ||
| 685 | |||
| 686 | // If the start address isn't aligned, we need a block. | ||
| 687 | if (out_blocks_needed != nullptr && | ||
| 688 | Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { | ||
| 689 | ++(*out_blocks_needed); | ||
| 690 | } | ||
| 691 | |||
| 692 | R_SUCCEED(); | ||
| 693 | } | ||
| 694 | |||
| 695 | Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, | ||
| 696 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 697 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 698 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 699 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 700 | KMemoryAttribute lock_attr) { | ||
| 701 | // Validate basic preconditions. | ||
| 702 | ASSERT(False(lock_attr & attr)); | ||
| 703 | ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); | ||
| 704 | |||
| 705 | // Validate the lock request. | ||
| 706 | const size_t num_pages = size / PageSize; | ||
| 707 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 708 | |||
| 709 | // Lock the table. | ||
| 710 | KScopedLightLock lk(m_general_lock); | ||
| 711 | |||
| 712 | // Check that the output page group is empty, if it exists. | ||
| 713 | if (out_pg) { | ||
| 714 | ASSERT(out_pg->GetNumPages() == 0); | ||
| 715 | } | ||
| 716 | |||
| 717 | // Check the state. | ||
| 718 | KMemoryState old_state; | ||
| 719 | KMemoryPermission old_perm; | ||
| 720 | KMemoryAttribute old_attr; | ||
| 721 | size_t num_allocator_blocks; | ||
| 722 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 723 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 724 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 725 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 726 | attr_mask, attr)); | ||
| 727 | |||
| 728 | // Get the physical address, if we're supposed to. | ||
| 729 | if (out_paddr != nullptr) { | ||
| 730 | ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr)); | ||
| 731 | } | ||
| 732 | |||
| 733 | // Make the page group, if we're supposed to. | ||
| 734 | if (out_pg != nullptr) { | ||
| 735 | R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); | ||
| 736 | } | ||
| 737 | |||
| 738 | // Create an update allocator. | ||
| 739 | Result allocator_result; | ||
| 740 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 741 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 742 | R_TRY(allocator_result); | ||
| 743 | |||
| 744 | // Decide on new perm and attr. | ||
| 745 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 746 | KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr); | ||
| 747 | |||
| 748 | // Update permission, if we need to. | ||
| 749 | if (new_perm != old_perm) { | ||
| 750 | // We're going to perform an update, so create a helper. | ||
| 751 | KScopedPageTableUpdater updater(this); | ||
| 752 | |||
| 753 | const KPageProperties properties = {new_perm, false, | ||
| 754 | True(old_attr & KMemoryAttribute::Uncached), | ||
| 755 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 756 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 757 | OperationType::ChangePermissions, false)); | ||
| 758 | } | ||
| 759 | |||
| 760 | // Apply the memory block updates. | ||
| 761 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 762 | new_attr, KMemoryBlockDisableMergeAttribute::Locked, | ||
| 763 | KMemoryBlockDisableMergeAttribute::None); | ||
| 764 | |||
| 765 | // If we have an output group, open. | ||
| 766 | if (out_pg) { | ||
| 767 | out_pg->Open(); | ||
| 768 | } | ||
| 769 | |||
| 770 | R_SUCCEED(); | ||
| 771 | } | ||
| 772 | |||
| 773 | Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 774 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 775 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 776 | KMemoryAttribute attr, KMemoryPermission new_perm, | ||
| 777 | KMemoryAttribute lock_attr, const KPageGroup* pg) { | ||
| 778 | // Validate basic preconditions. | ||
| 779 | ASSERT((attr_mask & lock_attr) == lock_attr); | ||
| 780 | ASSERT((attr & lock_attr) == lock_attr); | ||
| 781 | |||
| 782 | // Validate the unlock request. | ||
| 783 | const size_t num_pages = size / PageSize; | ||
| 784 | R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); | ||
| 785 | |||
| 786 | // Lock the table. | ||
| 787 | KScopedLightLock lk(m_general_lock); | ||
| 788 | |||
| 789 | // Check the state. | ||
| 790 | KMemoryState old_state; | ||
| 791 | KMemoryPermission old_perm; | ||
| 792 | KMemoryAttribute old_attr; | ||
| 793 | size_t num_allocator_blocks; | ||
| 794 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 795 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 796 | addr, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 797 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 798 | attr_mask, attr)); | ||
| 799 | |||
| 800 | // Check the page group. | ||
| 801 | if (pg != nullptr) { | ||
| 802 | R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion); | ||
| 803 | } | ||
| 804 | |||
| 805 | // Decide on new perm and attr. | ||
| 806 | new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; | ||
| 807 | KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr); | ||
| 808 | |||
| 809 | // Create an update allocator. | ||
| 810 | Result allocator_result; | ||
| 811 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 812 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 813 | R_TRY(allocator_result); | ||
| 814 | |||
| 815 | // Update permission, if we need to. | ||
| 816 | if (new_perm != old_perm) { | ||
| 817 | // We're going to perform an update, so create a helper. | ||
| 818 | KScopedPageTableUpdater updater(this); | ||
| 819 | |||
| 820 | const KPageProperties properties = {new_perm, false, | ||
| 821 | True(old_attr & KMemoryAttribute::Uncached), | ||
| 822 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 823 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 824 | OperationType::ChangePermissions, false)); | ||
| 825 | } | ||
| 826 | |||
| 827 | // Apply the memory block updates. | ||
| 828 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 829 | new_attr, KMemoryBlockDisableMergeAttribute::None, | ||
| 830 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 831 | |||
| 832 | R_SUCCEED(); | ||
| 833 | } | ||
| 834 | |||
| 835 | Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page, | ||
| 836 | KProcessAddress address) const { | ||
| 837 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 838 | ASSERT(out_info != nullptr); | ||
| 839 | ASSERT(out_page != nullptr); | ||
| 840 | |||
| 841 | const KMemoryBlock* block = m_memory_block_manager.FindBlock(address); | ||
| 842 | R_UNLESS(block != nullptr, ResultInvalidCurrentMemory); | ||
| 843 | |||
| 844 | *out_info = block->GetMemoryInfo(); | ||
| 845 | out_page->flags = 0; | ||
| 846 | R_SUCCEED(); | ||
| 847 | } | ||
| 848 | |||
| 849 | Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size, | ||
| 850 | Svc::MemoryState state) const { | ||
| 851 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 852 | ASSERT(out != nullptr); | ||
| 853 | |||
| 854 | const KProcessAddress region_start = this->GetRegionAddress(state); | ||
| 855 | const size_t region_size = this->GetRegionSize(state); | ||
| 856 | |||
| 857 | // Check that the address/size are potentially valid. | ||
| 858 | R_UNLESS((address < address + size), ResultNotFound); | ||
| 859 | |||
| 860 | // Lock the table. | ||
| 861 | KScopedLightLock lk(m_general_lock); | ||
| 862 | |||
| 863 | auto& impl = this->GetImpl(); | ||
| 864 | |||
| 865 | // Begin traversal. | ||
| 866 | TraversalContext context; | ||
| 867 | TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 868 | bool cur_valid = false; | ||
| 869 | TraversalEntry next_entry; | ||
| 870 | bool next_valid; | ||
| 871 | size_t tot_size = 0; | ||
| 872 | |||
| 873 | next_valid = | ||
| 874 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start); | ||
| 875 | next_entry.block_size = | ||
| 876 | (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1))); | ||
| 877 | |||
| 878 | // Iterate, looking for entry. | ||
| 879 | while (true) { | ||
| 880 | if ((!next_valid && !cur_valid) || | ||
| 881 | (next_valid && cur_valid && | ||
| 882 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 883 | cur_entry.block_size += next_entry.block_size; | ||
| 884 | } else { | ||
| 885 | if (cur_valid && cur_entry.phys_addr <= address && | ||
| 886 | address + size <= cur_entry.phys_addr + cur_entry.block_size) { | ||
| 887 | // Check if this region is valid. | ||
| 888 | const KProcessAddress mapped_address = | ||
| 889 | (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr); | ||
| 890 | if (R_SUCCEEDED(this->CheckMemoryState( | ||
| 891 | mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state), | ||
| 892 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, | ||
| 893 | KMemoryAttribute::None, KMemoryAttribute::None))) { | ||
| 894 | // It is! | ||
| 895 | *out = mapped_address; | ||
| 896 | R_SUCCEED(); | ||
| 897 | } | ||
| 898 | } | ||
| 899 | |||
| 900 | // Update tracking variables. | ||
| 901 | tot_size += cur_entry.block_size; | ||
| 902 | cur_entry = next_entry; | ||
| 903 | cur_valid = next_valid; | ||
| 904 | } | ||
| 905 | |||
| 906 | if (cur_entry.block_size + tot_size >= region_size) { | ||
| 907 | break; | ||
| 908 | } | ||
| 909 | |||
| 910 | next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 911 | } | ||
| 912 | |||
| 913 | // Check the last entry. | ||
| 914 | R_UNLESS(cur_valid, ResultNotFound); | ||
| 915 | R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound); | ||
| 916 | R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound); | ||
| 917 | |||
| 918 | // Check if the last region is valid. | ||
| 919 | const KProcessAddress mapped_address = | ||
| 920 | (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr); | ||
| 921 | R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All, | ||
| 922 | static_cast<KMemoryState>(state), | ||
| 923 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, | ||
| 924 | KMemoryAttribute::None, KMemoryAttribute::None)) { | ||
| 925 | R_CONVERT_ALL(ResultNotFound); | ||
| 926 | } | ||
| 927 | R_END_TRY_CATCH; | ||
| 928 | |||
| 929 | // We found the region. | ||
| 930 | *out = mapped_address; | ||
| 931 | R_SUCCEED(); | ||
| 932 | } | ||
| 933 | |||
| 934 | Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 935 | size_t size) { | ||
| 936 | // Lock the table. | ||
| 937 | KScopedLightLock lk(m_general_lock); | ||
| 938 | |||
| 939 | // Validate that the source address's state is valid. | ||
| 940 | KMemoryState src_state; | ||
| 941 | size_t num_src_allocator_blocks; | ||
| 942 | R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, | ||
| 943 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 944 | KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 945 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 946 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 947 | |||
| 948 | // Validate that the dst address's state is valid. | ||
| 949 | size_t num_dst_allocator_blocks; | ||
| 950 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, | ||
| 951 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 952 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 953 | KMemoryAttribute::None)); | ||
| 954 | |||
| 955 | // Create an update allocator for the source. | ||
| 956 | Result src_allocator_result; | ||
| 957 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 958 | m_memory_block_slab_manager, | ||
| 959 | num_src_allocator_blocks); | ||
| 960 | R_TRY(src_allocator_result); | ||
| 961 | |||
| 962 | // Create an update allocator for the destination. | ||
| 963 | Result dst_allocator_result; | ||
| 964 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 965 | m_memory_block_slab_manager, | ||
| 966 | num_dst_allocator_blocks); | ||
| 967 | R_TRY(dst_allocator_result); | ||
| 968 | |||
| 969 | // Map the memory. | ||
| 970 | { | ||
| 971 | // Determine the number of pages being operated on. | ||
| 972 | const size_t num_pages = size / PageSize; | ||
| 973 | |||
| 974 | // Create page groups for the memory being unmapped. | ||
| 975 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 976 | |||
| 977 | // Create the page group representing the source. | ||
| 978 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 979 | |||
| 980 | // We're going to perform an update, so create a helper. | ||
| 981 | KScopedPageTableUpdater updater(this); | ||
| 982 | |||
| 983 | // Reprotect the source as kernel-read/not mapped. | ||
| 984 | const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( | ||
| 985 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 986 | const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; | ||
| 987 | const KPageProperties src_properties = {new_src_perm, false, false, | ||
| 988 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 989 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 990 | OperationType::ChangePermissions, false)); | ||
| 991 | |||
| 992 | // Ensure that we unprotect the source pages on failure. | ||
| 993 | ON_RESULT_FAILURE { | ||
| 994 | const KPageProperties unprotect_properties = { | ||
| 995 | KMemoryPermission::UserReadWrite, false, false, | ||
| 996 | DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 997 | R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, | ||
| 998 | unprotect_properties, OperationType::ChangePermissions, true)); | ||
| 999 | }; | ||
| 1000 | |||
| 1001 | // Map the alias pages. | ||
| 1002 | const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1003 | DisableMergeAttribute::DisableHead}; | ||
| 1004 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, | ||
| 1005 | false)); | ||
| 1006 | |||
| 1007 | // Apply the memory block updates. | ||
| 1008 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 1009 | src_state, new_src_perm, new_src_attr, | ||
| 1010 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 1011 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1012 | m_memory_block_manager.Update( | ||
| 1013 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack, | ||
| 1014 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1015 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | R_SUCCEED(); | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1022 | size_t size) { | ||
| 1023 | // Lock the table. | ||
| 1024 | KScopedLightLock lk(m_general_lock); | ||
| 1025 | |||
| 1026 | // Validate that the source address's state is valid. | ||
| 1027 | KMemoryState src_state; | ||
| 1028 | size_t num_src_allocator_blocks; | ||
| 1029 | R_TRY(this->CheckMemoryState( | ||
| 1030 | std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), | ||
| 1031 | src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, | ||
| 1032 | KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, | ||
| 1033 | KMemoryAttribute::All, KMemoryAttribute::Locked)); | ||
| 1034 | |||
| 1035 | // Validate that the dst address's state is valid. | ||
| 1036 | KMemoryPermission dst_perm; | ||
| 1037 | size_t num_dst_allocator_blocks; | ||
| 1038 | R_TRY(this->CheckMemoryState( | ||
| 1039 | nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), | ||
| 1040 | dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, | ||
| 1041 | KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1042 | |||
| 1043 | // Create an update allocator for the source. | ||
| 1044 | Result src_allocator_result; | ||
| 1045 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1046 | m_memory_block_slab_manager, | ||
| 1047 | num_src_allocator_blocks); | ||
| 1048 | R_TRY(src_allocator_result); | ||
| 1049 | |||
| 1050 | // Create an update allocator for the destination. | ||
| 1051 | Result dst_allocator_result; | ||
| 1052 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1053 | m_memory_block_slab_manager, | ||
| 1054 | num_dst_allocator_blocks); | ||
| 1055 | R_TRY(dst_allocator_result); | ||
| 1056 | |||
| 1057 | // Unmap the memory. | ||
| 1058 | { | ||
| 1059 | // Determine the number of pages being operated on. | ||
| 1060 | const size_t num_pages = size / PageSize; | ||
| 1061 | |||
| 1062 | // Create page groups for the memory being unmapped. | ||
| 1063 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1064 | |||
| 1065 | // Create the page group representing the destination. | ||
| 1066 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 1067 | |||
| 1068 | // Ensure the page group is the valid for the source. | ||
| 1069 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||
| 1070 | |||
| 1071 | // We're going to perform an update, so create a helper. | ||
| 1072 | KScopedPageTableUpdater updater(this); | ||
| 1073 | |||
| 1074 | // Unmap the aliased copy of the pages. | ||
| 1075 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1076 | DisableMergeAttribute::None}; | ||
| 1077 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, | ||
| 1078 | dst_unmap_properties, OperationType::Unmap, false)); | ||
| 1079 | |||
| 1080 | // Ensure that we re-map the aliased pages on failure. | ||
| 1081 | ON_RESULT_FAILURE { | ||
| 1082 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 1083 | }; | ||
| 1084 | |||
| 1085 | // Try to set the permissions for the source pages back to what they should be. | ||
| 1086 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1087 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 1088 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 1089 | OperationType::ChangePermissions, false)); | ||
| 1090 | |||
| 1091 | // Apply the memory block updates. | ||
| 1092 | m_memory_block_manager.Update( | ||
| 1093 | std::addressof(src_allocator), src_address, num_pages, src_state, | ||
| 1094 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1095 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 1096 | m_memory_block_manager.Update( | ||
| 1097 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 1098 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1099 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | R_SUCCEED(); | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1106 | size_t size) { | ||
| 1107 | // Validate the mapping request. | ||
| 1108 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 1109 | ResultInvalidMemoryRegion); | ||
| 1110 | |||
| 1111 | // Lock the table. | ||
| 1112 | KScopedLightLock lk(m_general_lock); | ||
| 1113 | |||
| 1114 | // Verify that the source memory is normal heap. | ||
| 1115 | KMemoryState src_state; | ||
| 1116 | KMemoryPermission src_perm; | ||
| 1117 | size_t num_src_allocator_blocks; | ||
| 1118 | R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, | ||
| 1119 | std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1120 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, | ||
| 1121 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 1122 | KMemoryAttribute::None)); | ||
| 1123 | |||
| 1124 | // Verify that the destination memory is unmapped. | ||
| 1125 | size_t num_dst_allocator_blocks; | ||
| 1126 | R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, | ||
| 1127 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1128 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1129 | KMemoryAttribute::None)); | ||
| 1130 | |||
| 1131 | // Create an update allocator for the source. | ||
| 1132 | Result src_allocator_result; | ||
| 1133 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1134 | m_memory_block_slab_manager, | ||
| 1135 | num_src_allocator_blocks); | ||
| 1136 | R_TRY(src_allocator_result); | ||
| 1137 | |||
| 1138 | // Create an update allocator for the destination. | ||
| 1139 | Result dst_allocator_result; | ||
| 1140 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1141 | m_memory_block_slab_manager, | ||
| 1142 | num_dst_allocator_blocks); | ||
| 1143 | R_TRY(dst_allocator_result); | ||
| 1144 | |||
| 1145 | // Map the code memory. | ||
| 1146 | { | ||
| 1147 | // Determine the number of pages being operated on. | ||
| 1148 | const size_t num_pages = size / PageSize; | ||
| 1149 | |||
| 1150 | // Create page groups for the memory being unmapped. | ||
| 1151 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1152 | |||
| 1153 | // Create the page group representing the source. | ||
| 1154 | R_TRY(this->MakePageGroup(pg, src_address, num_pages)); | ||
| 1155 | |||
| 1156 | // We're going to perform an update, so create a helper. | ||
| 1157 | KScopedPageTableUpdater updater(this); | ||
| 1158 | |||
| 1159 | // Reprotect the source as kernel-read/not mapped. | ||
| 1160 | const KMemoryPermission new_perm = static_cast<KMemoryPermission>( | ||
| 1161 | KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); | ||
| 1162 | const KPageProperties src_properties = {new_perm, false, false, | ||
| 1163 | DisableMergeAttribute::DisableHeadBodyTail}; | ||
| 1164 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 1165 | OperationType::ChangePermissions, false)); | ||
| 1166 | |||
| 1167 | // Ensure that we unprotect the source pages on failure. | ||
| 1168 | ON_RESULT_FAILURE { | ||
| 1169 | const KPageProperties unprotect_properties = { | ||
| 1170 | src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail}; | ||
| 1171 | R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, | ||
| 1172 | unprotect_properties, OperationType::ChangePermissions, true)); | ||
| 1173 | }; | ||
| 1174 | |||
| 1175 | // Map the alias pages. | ||
| 1176 | const KPageProperties dst_properties = {new_perm, false, false, | ||
| 1177 | DisableMergeAttribute::DisableHead}; | ||
| 1178 | R_TRY( | ||
| 1179 | this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); | ||
| 1180 | |||
| 1181 | // Apply the memory block updates. | ||
| 1182 | m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, | ||
| 1183 | src_state, new_perm, KMemoryAttribute::Locked, | ||
| 1184 | KMemoryBlockDisableMergeAttribute::Locked, | ||
| 1185 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1186 | m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, | ||
| 1187 | KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, | ||
| 1188 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1189 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | R_SUCCEED(); | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 1196 | size_t size) { | ||
| 1197 | // Validate the mapping request. | ||
| 1198 | R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), | ||
| 1199 | ResultInvalidMemoryRegion); | ||
| 1200 | |||
| 1201 | // Lock the table. | ||
| 1202 | KScopedLightLock lk(m_general_lock); | ||
| 1203 | |||
| 1204 | // Verify that the source memory is locked normal heap. | ||
| 1205 | size_t num_src_allocator_blocks; | ||
| 1206 | R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, | ||
| 1207 | KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, | ||
| 1208 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 1209 | KMemoryAttribute::Locked)); | ||
| 1210 | |||
| 1211 | // Verify that the destination memory is aliasable code. | ||
| 1212 | size_t num_dst_allocator_blocks; | ||
| 1213 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 1214 | std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, | ||
| 1215 | KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, | ||
| 1216 | KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); | ||
| 1217 | |||
| 1218 | // Determine whether any pages being unmapped are code. | ||
| 1219 | bool any_code_pages = false; | ||
| 1220 | { | ||
| 1221 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); | ||
| 1222 | while (true) { | ||
| 1223 | // Get the memory info. | ||
| 1224 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1225 | |||
| 1226 | // Check if the memory has code flag. | ||
| 1227 | if (True(info.GetState() & KMemoryState::FlagCode)) { | ||
| 1228 | any_code_pages = true; | ||
| 1229 | break; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | // Check if we're done. | ||
| 1233 | if (dst_address + size - 1 <= info.GetLastAddress()) { | ||
| 1234 | break; | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | // Advance. | ||
| 1238 | ++it; | ||
| 1239 | } | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | // Ensure that we maintain the instruction cache. | ||
| 1243 | bool reprotected_pages = false; | ||
| 1244 | SCOPE_EXIT({ | ||
| 1245 | if (reprotected_pages && any_code_pages) { | ||
| 1246 | InvalidateInstructionCache(m_system, dst_address, size); | ||
| 1247 | } | ||
| 1248 | }); | ||
| 1249 | |||
| 1250 | // Unmap. | ||
| 1251 | { | ||
| 1252 | // Determine the number of pages being operated on. | ||
| 1253 | const size_t num_pages = size / PageSize; | ||
| 1254 | |||
| 1255 | // Create page groups for the memory being unmapped. | ||
| 1256 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1257 | |||
| 1258 | // Create the page group representing the destination. | ||
| 1259 | R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); | ||
| 1260 | |||
| 1261 | // Verify that the page group contains the same pages as the source. | ||
| 1262 | R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion); | ||
| 1263 | |||
| 1264 | // Create an update allocator for the source. | ||
| 1265 | Result src_allocator_result; | ||
| 1266 | KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), | ||
| 1267 | m_memory_block_slab_manager, | ||
| 1268 | num_src_allocator_blocks); | ||
| 1269 | R_TRY(src_allocator_result); | ||
| 1270 | |||
| 1271 | // Create an update allocator for the destination. | ||
| 1272 | Result dst_allocator_result; | ||
| 1273 | KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), | ||
| 1274 | m_memory_block_slab_manager, | ||
| 1275 | num_dst_allocator_blocks); | ||
| 1276 | R_TRY(dst_allocator_result); | ||
| 1277 | |||
| 1278 | // We're going to perform an update, so create a helper. | ||
| 1279 | KScopedPageTableUpdater updater(this); | ||
| 1280 | |||
| 1281 | // Unmap the aliased copy of the pages. | ||
| 1282 | const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1283 | DisableMergeAttribute::None}; | ||
| 1284 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, | ||
| 1285 | dst_unmap_properties, OperationType::Unmap, false)); | ||
| 1286 | |||
| 1287 | // Ensure that we re-map the aliased pages on failure. | ||
| 1288 | ON_RESULT_FAILURE { | ||
| 1289 | this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg); | ||
| 1290 | }; | ||
| 1291 | |||
| 1292 | // Try to set the permissions for the source pages back to what they should be. | ||
| 1293 | const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1294 | DisableMergeAttribute::EnableAndMergeHeadBodyTail}; | ||
| 1295 | R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties, | ||
| 1296 | OperationType::ChangePermissions, false)); | ||
| 1297 | |||
| 1298 | // Apply the memory block updates. | ||
| 1299 | m_memory_block_manager.Update( | ||
| 1300 | std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, | ||
| 1301 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1302 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1303 | m_memory_block_manager.Update( | ||
| 1304 | std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, | ||
| 1305 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 1306 | KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); | ||
| 1307 | |||
| 1308 | // Note that we reprotected pages. | ||
| 1309 | reprotected_pages = true; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | R_SUCCEED(); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 1316 | // Get the insecure memory resource limit and pool. | ||
| 1317 | auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); | ||
| 1318 | const auto insecure_pool = | ||
| 1319 | static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool()); | ||
| 1320 | |||
| 1321 | // Reserve the insecure memory. | ||
| 1322 | // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached. | ||
| 1323 | KScopedResourceReservation memory_reservation(insecure_resource_limit, | ||
| 1324 | Svc::LimitableResource::PhysicalMemoryMax, size); | ||
| 1325 | R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory); | ||
| 1326 | |||
| 1327 | // Allocate pages for the insecure memory. | ||
| 1328 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1329 | R_TRY(m_kernel.MemoryManager().AllocateAndOpen( | ||
| 1330 | std::addressof(pg), size / PageSize, | ||
| 1331 | KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront))); | ||
| 1332 | |||
| 1333 | // Close the opened pages when we're done with them. | ||
| 1334 | // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed | ||
| 1335 | // automatically. | ||
| 1336 | SCOPE_EXIT({ pg.Close(); }); | ||
| 1337 | |||
| 1338 | // Clear all the newly allocated pages. | ||
| 1339 | for (const auto& it : pg) { | ||
| 1340 | std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), | ||
| 1341 | static_cast<u32>(m_heap_fill_value), it.GetSize()); | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | // Lock the table. | ||
| 1345 | KScopedLightLock lk(m_general_lock); | ||
| 1346 | |||
| 1347 | // Validate that the address's state is valid. | ||
| 1348 | size_t num_allocator_blocks; | ||
| 1349 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1350 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 1351 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1352 | KMemoryAttribute::None)); | ||
| 1353 | |||
| 1354 | // Create an update allocator. | ||
| 1355 | Result allocator_result; | ||
| 1356 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1357 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1358 | R_TRY(allocator_result); | ||
| 1359 | |||
| 1360 | // We're going to perform an update, so create a helper. | ||
| 1361 | KScopedPageTableUpdater updater(this); | ||
| 1362 | |||
| 1363 | // Map the pages. | ||
| 1364 | const size_t num_pages = size / PageSize; | ||
| 1365 | const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 1366 | DisableMergeAttribute::DisableHead}; | ||
| 1367 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, | ||
| 1368 | OperationType::MapGroup, false)); | ||
| 1369 | |||
| 1370 | // Apply the memory block update. | ||
| 1371 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, | ||
| 1372 | KMemoryState::Insecure, KMemoryPermission::UserReadWrite, | ||
| 1373 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1374 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1375 | |||
| 1376 | // Update our mapped insecure size. | ||
| 1377 | m_mapped_insecure_memory += size; | ||
| 1378 | |||
| 1379 | // Commit the memory reservation. | ||
| 1380 | memory_reservation.Commit(); | ||
| 1381 | |||
| 1382 | // We succeeded. | ||
| 1383 | R_SUCCEED(); | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 1387 | // Lock the table. | ||
| 1388 | KScopedLightLock lk(m_general_lock); | ||
| 1389 | |||
| 1390 | // Check the memory state. | ||
| 1391 | size_t num_allocator_blocks; | ||
| 1392 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1393 | KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All, | ||
| 1394 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 1395 | KMemoryAttribute::None)); | ||
| 1396 | |||
| 1397 | // Create an update allocator. | ||
| 1398 | Result allocator_result; | ||
| 1399 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1400 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1401 | R_TRY(allocator_result); | ||
| 1402 | |||
| 1403 | // We're going to perform an update, so create a helper. | ||
| 1404 | KScopedPageTableUpdater updater(this); | ||
| 1405 | |||
| 1406 | // Unmap the memory. | ||
| 1407 | const size_t num_pages = size / PageSize; | ||
| 1408 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1409 | DisableMergeAttribute::None}; | ||
| 1410 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties, | ||
| 1411 | OperationType::Unmap, false)); | ||
| 1412 | |||
| 1413 | // Apply the memory block update. | ||
| 1414 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 1415 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 1416 | KMemoryBlockDisableMergeAttribute::None, | ||
| 1417 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1418 | |||
| 1419 | // Update our mapped insecure size. | ||
| 1420 | m_mapped_insecure_memory -= size; | ||
| 1421 | |||
| 1422 | // Release the insecure memory from the insecure limit. | ||
| 1423 | if (auto* const insecure_resource_limit = | ||
| 1424 | KSystemControl::GetInsecureMemoryResourceLimit(m_kernel); | ||
| 1425 | insecure_resource_limit != nullptr) { | ||
| 1426 | insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size); | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | R_SUCCEED(); | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 1433 | size_t num_pages, size_t alignment, size_t offset, | ||
| 1434 | size_t guard_pages) const { | ||
| 1435 | KProcessAddress address = 0; | ||
| 1436 | |||
| 1437 | if (num_pages <= region_num_pages) { | ||
| 1438 | if (this->IsAslrEnabled()) { | ||
| 1439 | // Try to directly find a free area up to 8 times. | ||
| 1440 | for (size_t i = 0; i < 8; i++) { | ||
| 1441 | const size_t random_offset = | ||
| 1442 | KSystemControl::GenerateRandomRange( | ||
| 1443 | 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * | ||
| 1444 | alignment; | ||
| 1445 | const KProcessAddress candidate = | ||
| 1446 | Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset; | ||
| 1447 | |||
| 1448 | KMemoryInfo info; | ||
| 1449 | Svc::PageInfo page_info; | ||
| 1450 | R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), | ||
| 1451 | candidate)); | ||
| 1452 | |||
| 1453 | if (info.m_state != KMemoryState::Free) { | ||
| 1454 | continue; | ||
| 1455 | } | ||
| 1456 | if (!(region_start <= candidate)) { | ||
| 1457 | continue; | ||
| 1458 | } | ||
| 1459 | if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { | ||
| 1460 | continue; | ||
| 1461 | } | ||
| 1462 | if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= | ||
| 1463 | info.GetLastAddress())) { | ||
| 1464 | continue; | ||
| 1465 | } | ||
| 1466 | if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= | ||
| 1467 | region_start + region_num_pages * PageSize - 1)) { | ||
| 1468 | continue; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | address = candidate; | ||
| 1472 | break; | ||
| 1473 | } | ||
| 1474 | // Fall back to finding the first free area with a random offset. | ||
| 1475 | if (address == 0) { | ||
| 1476 | // NOTE: Nintendo does not account for guard pages here. | ||
| 1477 | // This may theoretically cause an offset to be chosen that cannot be mapped. | ||
| 1478 | // We will account for guard pages. | ||
| 1479 | const size_t offset_pages = KSystemControl::GenerateRandomRange( | ||
| 1480 | 0, region_num_pages - num_pages - guard_pages); | ||
| 1481 | address = m_memory_block_manager.FindFreeArea( | ||
| 1482 | region_start + offset_pages * PageSize, region_num_pages - offset_pages, | ||
| 1483 | num_pages, alignment, offset, guard_pages); | ||
| 1484 | } | ||
| 1485 | } | ||
| 1486 | // Find the first free area. | ||
| 1487 | if (address == 0) { | ||
| 1488 | address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, | ||
| 1489 | alignment, offset, guard_pages); | ||
| 1490 | } | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | return address; | ||
| 1494 | } | ||
| 1495 | |||
| 1496 | size_t KPageTableBase::GetSize(KMemoryState state) const { | ||
| 1497 | // Lock the table. | ||
| 1498 | KScopedLightLock lk(m_general_lock); | ||
| 1499 | |||
| 1500 | // Iterate, counting blocks with the desired state. | ||
| 1501 | size_t total_size = 0; | ||
| 1502 | for (KMemoryBlockManager::const_iterator it = | ||
| 1503 | m_memory_block_manager.FindIterator(m_address_space_start); | ||
| 1504 | it != m_memory_block_manager.end(); ++it) { | ||
| 1505 | // Get the memory info. | ||
| 1506 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1507 | if (info.GetState() == state) { | ||
| 1508 | total_size += info.GetSize(); | ||
| 1509 | } | ||
| 1510 | } | ||
| 1511 | |||
| 1512 | return total_size; | ||
| 1513 | } | ||
| 1514 | |||
| 1515 | size_t KPageTableBase::GetCodeSize() const { | ||
| 1516 | return this->GetSize(KMemoryState::Code); | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | size_t KPageTableBase::GetCodeDataSize() const { | ||
| 1520 | return this->GetSize(KMemoryState::CodeData); | ||
| 1521 | } | ||
| 1522 | |||
| 1523 | size_t KPageTableBase::GetAliasCodeSize() const { | ||
| 1524 | return this->GetSize(KMemoryState::AliasCode); | ||
| 1525 | } | ||
| 1526 | |||
| 1527 | size_t KPageTableBase::GetAliasCodeDataSize() const { | ||
| 1528 | return this->GetSize(KMemoryState::AliasCodeData); | ||
| 1529 | } | ||
| 1530 | |||
| 1531 | Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 1532 | size_t num_pages, KMemoryPermission perm) { | ||
| 1533 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1534 | |||
| 1535 | // Create a page group to hold the pages we allocate. | ||
| 1536 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1537 | |||
| 1538 | // Allocate the pages. | ||
| 1539 | R_TRY( | ||
| 1540 | m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); | ||
| 1541 | |||
| 1542 | // Ensure that the page group is closed when we're done working with it. | ||
| 1543 | SCOPE_EXIT({ pg.Close(); }); | ||
| 1544 | |||
| 1545 | // Clear all pages. | ||
| 1546 | for (const auto& it : pg) { | ||
| 1547 | std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), | ||
| 1548 | static_cast<u32>(m_heap_fill_value), it.GetSize()); | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | // Map the pages. | ||
| 1552 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None}; | ||
| 1553 | R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup, | ||
| 1554 | false)); | ||
| 1555 | } | ||
| 1556 | |||
| 1557 | Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 1558 | const KPageGroup& pg, const KPageProperties properties, | ||
| 1559 | bool reuse_ll) { | ||
| 1560 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1561 | |||
| 1562 | // Note the current address, so that we can iterate. | ||
| 1563 | const KProcessAddress start_address = address; | ||
| 1564 | KProcessAddress cur_address = address; | ||
| 1565 | |||
| 1566 | // Ensure that we clean up on failure. | ||
| 1567 | ON_RESULT_FAILURE { | ||
| 1568 | ASSERT(!reuse_ll); | ||
| 1569 | if (cur_address != start_address) { | ||
| 1570 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 1571 | DisableMergeAttribute::None}; | ||
| 1572 | R_ASSERT(this->Operate(page_list, start_address, | ||
| 1573 | (cur_address - start_address) / PageSize, 0, false, | ||
| 1574 | unmap_properties, OperationType::Unmap, true)); | ||
| 1575 | } | ||
| 1576 | }; | ||
| 1577 | |||
| 1578 | // Iterate, mapping all pages in the group. | ||
| 1579 | for (const auto& block : pg) { | ||
| 1580 | // Map and advance. | ||
| 1581 | const KPageProperties cur_properties = | ||
| 1582 | (cur_address == start_address) | ||
| 1583 | ? properties | ||
| 1584 | : KPageProperties{properties.perm, properties.io, properties.uncached, | ||
| 1585 | DisableMergeAttribute::None}; | ||
| 1586 | R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, | ||
| 1587 | cur_properties, OperationType::Map, reuse_ll)); | ||
| 1588 | cur_address += block.GetSize(); | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | // We succeeded! | ||
| 1592 | R_SUCCEED(); | ||
| 1593 | } | ||
| 1594 | |||
| 1595 | void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 1596 | const KPageGroup& pg) { | ||
| 1597 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1598 | |||
| 1599 | // Note the current address, so that we can iterate. | ||
| 1600 | const KProcessAddress start_address = address; | ||
| 1601 | const KProcessAddress last_address = start_address + size - 1; | ||
| 1602 | const KProcessAddress end_address = last_address + 1; | ||
| 1603 | |||
| 1604 | // Iterate over the memory. | ||
| 1605 | auto pg_it = pg.begin(); | ||
| 1606 | ASSERT(pg_it != pg.end()); | ||
| 1607 | |||
| 1608 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 1609 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1610 | |||
| 1611 | auto it = m_memory_block_manager.FindIterator(start_address); | ||
| 1612 | while (true) { | ||
| 1613 | // Check that the iterator is valid. | ||
| 1614 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1615 | |||
| 1616 | // Get the memory info. | ||
| 1617 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1618 | |||
| 1619 | // Determine the range to map. | ||
| 1620 | KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address)); | ||
| 1621 | const KProcessAddress map_end_address = | ||
| 1622 | std::min(info.GetEndAddress(), GetInteger(end_address)); | ||
| 1623 | ASSERT(map_end_address != map_address); | ||
| 1624 | |||
| 1625 | // Determine if we should disable head merge. | ||
| 1626 | const bool disable_head_merge = | ||
| 1627 | info.GetAddress() >= GetInteger(start_address) && | ||
| 1628 | True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1629 | const KPageProperties map_properties = { | ||
| 1630 | info.GetPermission(), false, false, | ||
| 1631 | disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None}; | ||
| 1632 | |||
| 1633 | // While we have pages to map, map them. | ||
| 1634 | size_t map_pages = (map_end_address - map_address) / PageSize; | ||
| 1635 | while (map_pages > 0) { | ||
| 1636 | // Check if we're at the end of the physical block. | ||
| 1637 | if (pg_pages == 0) { | ||
| 1638 | // Ensure there are more pages to map. | ||
| 1639 | ASSERT(pg_it != pg.end()); | ||
| 1640 | |||
| 1641 | // Advance our physical block. | ||
| 1642 | ++pg_it; | ||
| 1643 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1644 | pg_pages = pg_it->GetNumPages(); | ||
| 1645 | } | ||
| 1646 | |||
| 1647 | // Map whatever we can. | ||
| 1648 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 1649 | R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true, | ||
| 1650 | map_properties, OperationType::Map, true)); | ||
| 1651 | |||
| 1652 | // Advance. | ||
| 1653 | map_address += cur_pages * PageSize; | ||
| 1654 | map_pages -= cur_pages; | ||
| 1655 | |||
| 1656 | pg_phys_addr += cur_pages * PageSize; | ||
| 1657 | pg_pages -= cur_pages; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | // Check if we're done. | ||
| 1661 | if (last_address <= info.GetLastAddress()) { | ||
| 1662 | break; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | // Advance. | ||
| 1666 | ++it; | ||
| 1667 | } | ||
| 1668 | |||
| 1669 | // Check that we re-mapped precisely the page group. | ||
| 1670 | ASSERT((++pg_it) == pg.end()); | ||
| 1671 | } | ||
| 1672 | |||
| 1673 | Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) { | ||
| 1674 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1675 | |||
| 1676 | const size_t size = num_pages * PageSize; | ||
| 1677 | |||
| 1678 | // We're making a new group, not adding to an existing one. | ||
| 1679 | R_UNLESS(pg.empty(), ResultInvalidCurrentMemory); | ||
| 1680 | |||
| 1681 | auto& impl = this->GetImpl(); | ||
| 1682 | |||
| 1683 | // Begin traversal. | ||
| 1684 | TraversalContext context; | ||
| 1685 | TraversalEntry next_entry; | ||
| 1686 | R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), | ||
| 1687 | ResultInvalidCurrentMemory); | ||
| 1688 | |||
| 1689 | // Prepare tracking variables. | ||
| 1690 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 1691 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 1692 | size_t tot_size = cur_size; | ||
| 1693 | |||
| 1694 | // Iterate, adding to group as we go. | ||
| 1695 | while (tot_size < size) { | ||
| 1696 | R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), | ||
| 1697 | ResultInvalidCurrentMemory); | ||
| 1698 | |||
| 1699 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 1700 | const size_t cur_pages = cur_size / PageSize; | ||
| 1701 | |||
| 1702 | R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 1703 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 1704 | |||
| 1705 | cur_addr = next_entry.phys_addr; | ||
| 1706 | cur_size = next_entry.block_size; | ||
| 1707 | } else { | ||
| 1708 | cur_size += next_entry.block_size; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | tot_size += next_entry.block_size; | ||
| 1712 | } | ||
| 1713 | |||
| 1714 | // Ensure we add the right amount for the last block. | ||
| 1715 | if (tot_size > size) { | ||
| 1716 | cur_size -= (tot_size - size); | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | // add the last block. | ||
| 1720 | const size_t cur_pages = cur_size / PageSize; | ||
| 1721 | R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 1722 | R_TRY(pg.AddBlock(cur_addr, cur_pages)); | ||
| 1723 | |||
| 1724 | R_SUCCEED(); | ||
| 1725 | } | ||
| 1726 | |||
| 1727 | bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, | ||
| 1728 | size_t num_pages) { | ||
| 1729 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1730 | |||
| 1731 | const size_t size = num_pages * PageSize; | ||
| 1732 | |||
| 1733 | // Empty groups are necessarily invalid. | ||
| 1734 | if (pg.empty()) { | ||
| 1735 | return false; | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | auto& impl = this->GetImpl(); | ||
| 1739 | |||
| 1740 | // We're going to validate that the group we'd expect is the group we see. | ||
| 1741 | auto cur_it = pg.begin(); | ||
| 1742 | KPhysicalAddress cur_block_address = cur_it->GetAddress(); | ||
| 1743 | size_t cur_block_pages = cur_it->GetNumPages(); | ||
| 1744 | |||
| 1745 | auto UpdateCurrentIterator = [&]() { | ||
| 1746 | if (cur_block_pages == 0) { | ||
| 1747 | if ((++cur_it) == pg.end()) { | ||
| 1748 | return false; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | cur_block_address = cur_it->GetAddress(); | ||
| 1752 | cur_block_pages = cur_it->GetNumPages(); | ||
| 1753 | } | ||
| 1754 | return true; | ||
| 1755 | }; | ||
| 1756 | |||
| 1757 | // Begin traversal. | ||
| 1758 | TraversalContext context; | ||
| 1759 | TraversalEntry next_entry; | ||
| 1760 | if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) { | ||
| 1761 | return false; | ||
| 1762 | } | ||
| 1763 | |||
| 1764 | // Prepare tracking variables. | ||
| 1765 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 1766 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 1767 | size_t tot_size = cur_size; | ||
| 1768 | |||
| 1769 | // Iterate, comparing expected to actual. | ||
| 1770 | while (tot_size < size) { | ||
| 1771 | if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) { | ||
| 1772 | return false; | ||
| 1773 | } | ||
| 1774 | |||
| 1775 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 1776 | const size_t cur_pages = cur_size / PageSize; | ||
| 1777 | |||
| 1778 | if (!IsHeapPhysicalAddress(cur_addr)) { | ||
| 1779 | return false; | ||
| 1780 | } | ||
| 1781 | |||
| 1782 | if (!UpdateCurrentIterator()) { | ||
| 1783 | return false; | ||
| 1784 | } | ||
| 1785 | |||
| 1786 | if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { | ||
| 1787 | return false; | ||
| 1788 | } | ||
| 1789 | |||
| 1790 | cur_block_address += cur_size; | ||
| 1791 | cur_block_pages -= cur_pages; | ||
| 1792 | cur_addr = next_entry.phys_addr; | ||
| 1793 | cur_size = next_entry.block_size; | ||
| 1794 | } else { | ||
| 1795 | cur_size += next_entry.block_size; | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | tot_size += next_entry.block_size; | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | // Ensure we compare the right amount for the last block. | ||
| 1802 | if (tot_size > size) { | ||
| 1803 | cur_size -= (tot_size - size); | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | if (!IsHeapPhysicalAddress(cur_addr)) { | ||
| 1807 | return false; | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | if (!UpdateCurrentIterator()) { | ||
| 1811 | return false; | ||
| 1812 | } | ||
| 1813 | |||
| 1814 | return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); | ||
| 1815 | } | ||
| 1816 | |||
| 1817 | Result KPageTableBase::GetContiguousMemoryRangeWithState( | ||
| 1818 | MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask, | ||
| 1819 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 1820 | KMemoryAttribute attr_mask, KMemoryAttribute attr) { | ||
| 1821 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1822 | |||
| 1823 | auto& impl = this->GetImpl(); | ||
| 1824 | |||
| 1825 | // Begin a traversal. | ||
| 1826 | TraversalContext context; | ||
| 1827 | TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 1828 | R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), | ||
| 1829 | ResultInvalidCurrentMemory); | ||
| 1830 | |||
| 1831 | // Traverse until we have enough size or we aren't contiguous any more. | ||
| 1832 | const KPhysicalAddress phys_address = cur_entry.phys_addr; | ||
| 1833 | size_t contig_size; | ||
| 1834 | for (contig_size = | ||
| 1835 | cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); | ||
| 1836 | contig_size < size; contig_size += cur_entry.block_size) { | ||
| 1837 | if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) { | ||
| 1838 | break; | ||
| 1839 | } | ||
| 1840 | if (cur_entry.phys_addr != phys_address + contig_size) { | ||
| 1841 | break; | ||
| 1842 | } | ||
| 1843 | } | ||
| 1844 | |||
| 1845 | // Take the minimum size for our region. | ||
| 1846 | size = std::min(size, contig_size); | ||
| 1847 | |||
| 1848 | // Check that the memory is contiguous (modulo the reference count bit). | ||
| 1849 | const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted; | ||
| 1850 | const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 1851 | address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 1852 | attr_mask, attr)); | ||
| 1853 | if (!is_heap) { | ||
| 1854 | R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask, | ||
| 1855 | perm, attr_mask, attr)); | ||
| 1856 | } | ||
| 1857 | |||
| 1858 | // The memory is contiguous, so set the output range. | ||
| 1859 | out->Set(phys_address, size, is_heap); | ||
| 1860 | R_SUCCEED(); | ||
| 1861 | } | ||
| 1862 | |||
| 1863 | Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, | ||
| 1864 | Svc::MemoryPermission svc_perm) { | ||
| 1865 | const size_t num_pages = size / PageSize; | ||
| 1866 | |||
| 1867 | // Lock the table. | ||
| 1868 | KScopedLightLock lk(m_general_lock); | ||
| 1869 | |||
| 1870 | // Verify we can change the memory permission. | ||
| 1871 | KMemoryState old_state; | ||
| 1872 | KMemoryPermission old_perm; | ||
| 1873 | size_t num_allocator_blocks; | ||
| 1874 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 1875 | std::addressof(num_allocator_blocks), addr, size, | ||
| 1876 | KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, | ||
| 1877 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1878 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1879 | |||
| 1880 | // Determine new perm. | ||
| 1881 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 1882 | R_SUCCEED_IF(old_perm == new_perm); | ||
| 1883 | |||
| 1884 | // Create an update allocator. | ||
| 1885 | Result allocator_result; | ||
| 1886 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1887 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1888 | R_TRY(allocator_result); | ||
| 1889 | |||
| 1890 | // We're going to perform an update, so create a helper. | ||
| 1891 | KScopedPageTableUpdater updater(this); | ||
| 1892 | |||
| 1893 | // Perform mapping operation. | ||
| 1894 | const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None}; | ||
| 1895 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 1896 | OperationType::ChangePermissions, false)); | ||
| 1897 | |||
| 1898 | // Update the blocks. | ||
| 1899 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, | ||
| 1900 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1901 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1902 | |||
| 1903 | R_SUCCEED(); | ||
| 1904 | } | ||
| 1905 | |||
| 1906 | Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 1907 | Svc::MemoryPermission svc_perm) { | ||
| 1908 | const size_t num_pages = size / PageSize; | ||
| 1909 | |||
| 1910 | // Lock the table. | ||
| 1911 | KScopedLightLock lk(m_general_lock); | ||
| 1912 | |||
| 1913 | // Verify we can change the memory permission. | ||
| 1914 | KMemoryState old_state; | ||
| 1915 | KMemoryPermission old_perm; | ||
| 1916 | size_t num_allocator_blocks; | ||
| 1917 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, | ||
| 1918 | std::addressof(num_allocator_blocks), addr, size, | ||
| 1919 | KMemoryState::FlagCode, KMemoryState::FlagCode, | ||
| 1920 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1921 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 1922 | |||
| 1923 | // Make a new page group for the region. | ||
| 1924 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 1925 | |||
| 1926 | // Determine new perm/state. | ||
| 1927 | const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); | ||
| 1928 | KMemoryState new_state = old_state; | ||
| 1929 | const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite; | ||
| 1930 | const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 1931 | const bool was_x = | ||
| 1932 | (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute; | ||
| 1933 | ASSERT(!(is_w && is_x)); | ||
| 1934 | |||
| 1935 | if (is_w) { | ||
| 1936 | switch (old_state) { | ||
| 1937 | case KMemoryState::Code: | ||
| 1938 | new_state = KMemoryState::CodeData; | ||
| 1939 | break; | ||
| 1940 | case KMemoryState::AliasCode: | ||
| 1941 | new_state = KMemoryState::AliasCodeData; | ||
| 1942 | break; | ||
| 1943 | default: | ||
| 1944 | UNREACHABLE(); | ||
| 1945 | } | ||
| 1946 | } | ||
| 1947 | |||
| 1948 | // Create a page group, if we're setting execute permissions. | ||
| 1949 | if (is_x) { | ||
| 1950 | R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages)); | ||
| 1951 | } | ||
| 1952 | |||
| 1953 | // Succeed if there's nothing to do. | ||
| 1954 | R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); | ||
| 1955 | |||
| 1956 | // Create an update allocator. | ||
| 1957 | Result allocator_result; | ||
| 1958 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1959 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1960 | R_TRY(allocator_result); | ||
| 1961 | |||
| 1962 | // We're going to perform an update, so create a helper. | ||
| 1963 | KScopedPageTableUpdater updater(this); | ||
| 1964 | |||
| 1965 | // Perform mapping operation. | ||
| 1966 | const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None}; | ||
| 1967 | const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush | ||
| 1968 | : OperationType::ChangePermissions; | ||
| 1969 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation, | ||
| 1970 | false)); | ||
| 1971 | |||
| 1972 | // Update the blocks. | ||
| 1973 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, | ||
| 1974 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1975 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1976 | |||
| 1977 | // Ensure cache coherency, if we're setting pages as executable. | ||
| 1978 | if (is_x) { | ||
| 1979 | for (const auto& block : pg) { | ||
| 1980 | StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize()); | ||
| 1981 | } | ||
| 1982 | InvalidateInstructionCache(m_system, addr, size); | ||
| 1983 | } | ||
| 1984 | |||
| 1985 | R_SUCCEED(); | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, | ||
| 1989 | KMemoryAttribute attr) { | ||
| 1990 | const size_t num_pages = size / PageSize; | ||
| 1991 | ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); | ||
| 1992 | |||
| 1993 | // Lock the table. | ||
| 1994 | KScopedLightLock lk(m_general_lock); | ||
| 1995 | |||
| 1996 | // Verify we can change the memory attribute. | ||
| 1997 | KMemoryState old_state; | ||
| 1998 | KMemoryPermission old_perm; | ||
| 1999 | KMemoryAttribute old_attr; | ||
| 2000 | size_t num_allocator_blocks; | ||
| 2001 | constexpr KMemoryAttribute AttributeTestMask = | ||
| 2002 | ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); | ||
| 2003 | const KMemoryState state_test_mask = | ||
| 2004 | (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute | ||
| 2005 | : KMemoryState::None) | | ||
| 2006 | (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock | ||
| 2007 | : KMemoryState::None); | ||
| 2008 | R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), | ||
| 2009 | std::addressof(old_attr), std::addressof(num_allocator_blocks), | ||
| 2010 | addr, size, state_test_mask, state_test_mask, | ||
| 2011 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2012 | AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); | ||
| 2013 | |||
| 2014 | // Create an update allocator. | ||
| 2015 | Result allocator_result; | ||
| 2016 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2017 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2018 | R_TRY(allocator_result); | ||
| 2019 | |||
| 2020 | // We're going to perform an update, so create a helper. | ||
| 2021 | KScopedPageTableUpdater updater(this); | ||
| 2022 | |||
| 2023 | // If we need to, perform a change attribute operation. | ||
| 2024 | if (True(mask & KMemoryAttribute::Uncached)) { | ||
| 2025 | // Determine the new attribute. | ||
| 2026 | const KMemoryAttribute new_attr = | ||
| 2027 | static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask))); | ||
| 2028 | |||
| 2029 | // Perform operation. | ||
| 2030 | const KPageProperties properties = {old_perm, false, | ||
| 2031 | True(new_attr & KMemoryAttribute::Uncached), | ||
| 2032 | DisableMergeAttribute::None}; | ||
| 2033 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, | ||
| 2034 | OperationType::ChangePermissionsAndRefreshAndFlush, false)); | ||
| 2035 | } | ||
| 2036 | |||
| 2037 | // Update the blocks. | ||
| 2038 | m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr); | ||
| 2039 | |||
| 2040 | R_SUCCEED(); | ||
| 2041 | } | ||
| 2042 | |||
| 2043 | Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) { | ||
| 2044 | // Lock the physical memory mutex. | ||
| 2045 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | ||
| 2046 | |||
| 2047 | // Try to perform a reduction in heap, instead of an extension. | ||
| 2048 | KProcessAddress cur_address; | ||
| 2049 | size_t allocation_size; | ||
| 2050 | { | ||
| 2051 | // Lock the table. | ||
| 2052 | KScopedLightLock lk(m_general_lock); | ||
| 2053 | |||
| 2054 | // Validate that setting heap size is possible at all. | ||
| 2055 | R_UNLESS(!m_is_kernel, ResultOutOfMemory); | ||
| 2056 | R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), | ||
| 2057 | ResultOutOfMemory); | ||
| 2058 | R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); | ||
| 2059 | |||
| 2060 | if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) { | ||
| 2061 | // The size being requested is less than the current size, so we need to free the end of | ||
| 2062 | // the heap. | ||
| 2063 | |||
| 2064 | // Validate memory state. | ||
| 2065 | size_t num_allocator_blocks; | ||
| 2066 | R_TRY(this->CheckMemoryState( | ||
| 2067 | std::addressof(num_allocator_blocks), m_heap_region_start + size, | ||
| 2068 | (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All, | ||
| 2069 | KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 2070 | KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 2071 | |||
| 2072 | // Create an update allocator. | ||
| 2073 | Result allocator_result; | ||
| 2074 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2075 | m_memory_block_slab_manager, | ||
| 2076 | num_allocator_blocks); | ||
| 2077 | R_TRY(allocator_result); | ||
| 2078 | |||
| 2079 | // We're going to perform an update, so create a helper. | ||
| 2080 | KScopedPageTableUpdater updater(this); | ||
| 2081 | |||
| 2082 | // Unmap the end of the heap. | ||
| 2083 | const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize; | ||
| 2084 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2085 | DisableMergeAttribute::None}; | ||
| 2086 | R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0, | ||
| 2087 | false, unmap_properties, OperationType::Unmap, false)); | ||
| 2088 | |||
| 2089 | // Release the memory from the resource limit. | ||
| 2090 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 2091 | num_pages * PageSize); | ||
| 2092 | |||
| 2093 | // Apply the memory block update. | ||
| 2094 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | ||
| 2095 | num_pages, KMemoryState::Free, KMemoryPermission::None, | ||
| 2096 | KMemoryAttribute::None, | ||
| 2097 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2098 | size == 0 ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2099 | : KMemoryBlockDisableMergeAttribute::None); | ||
| 2100 | |||
| 2101 | // Update the current heap end. | ||
| 2102 | m_current_heap_end = m_heap_region_start + size; | ||
| 2103 | |||
| 2104 | // Set the output. | ||
| 2105 | *out = m_heap_region_start; | ||
| 2106 | R_SUCCEED(); | ||
| 2107 | } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) { | ||
| 2108 | // The size requested is exactly the current size. | ||
| 2109 | *out = m_heap_region_start; | ||
| 2110 | R_SUCCEED(); | ||
| 2111 | } else { | ||
| 2112 | // We have to allocate memory. Determine how much to allocate and where while the table | ||
| 2113 | // is locked. | ||
| 2114 | cur_address = m_current_heap_end; | ||
| 2115 | allocation_size = size - (m_current_heap_end - m_heap_region_start); | ||
| 2116 | } | ||
| 2117 | } | ||
| 2118 | |||
| 2119 | // Reserve memory for the heap extension. | ||
| 2120 | KScopedResourceReservation memory_reservation( | ||
| 2121 | m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size); | ||
| 2122 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 2123 | |||
| 2124 | // Allocate pages for the heap extension. | ||
| 2125 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 2126 | R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, | ||
| 2127 | m_allocate_option)); | ||
| 2128 | |||
| 2129 | // Close the opened pages when we're done with them. | ||
| 2130 | // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed | ||
| 2131 | // automatically. | ||
| 2132 | SCOPE_EXIT({ pg.Close(); }); | ||
| 2133 | |||
| 2134 | // Clear all the newly allocated pages. | ||
| 2135 | for (const auto& it : pg) { | ||
| 2136 | std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value, | ||
| 2137 | it.GetSize()); | ||
| 2138 | } | ||
| 2139 | |||
| 2140 | // Map the pages. | ||
| 2141 | { | ||
| 2142 | // Lock the table. | ||
| 2143 | KScopedLightLock lk(m_general_lock); | ||
| 2144 | |||
| 2145 | // Ensure that the heap hasn't changed since we began executing. | ||
| 2146 | ASSERT(cur_address == m_current_heap_end); | ||
| 2147 | |||
| 2148 | // Check the memory state. | ||
| 2149 | size_t num_allocator_blocks; | ||
| 2150 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, | ||
| 2151 | allocation_size, KMemoryState::All, KMemoryState::Free, | ||
| 2152 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2153 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2154 | |||
| 2155 | // Create an update allocator. | ||
| 2156 | Result allocator_result; | ||
| 2157 | KMemoryBlockManagerUpdateAllocator allocator( | ||
| 2158 | std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2159 | R_TRY(allocator_result); | ||
| 2160 | |||
| 2161 | // We're going to perform an update, so create a helper. | ||
| 2162 | KScopedPageTableUpdater updater(this); | ||
| 2163 | |||
| 2164 | // Map the pages. | ||
| 2165 | const size_t num_pages = allocation_size / PageSize; | ||
| 2166 | const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false, | ||
| 2167 | (m_current_heap_end == m_heap_region_start) | ||
| 2168 | ? DisableMergeAttribute::DisableHead | ||
| 2169 | : DisableMergeAttribute::None}; | ||
| 2170 | R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, | ||
| 2171 | map_properties, OperationType::MapGroup, false)); | ||
| 2172 | |||
| 2173 | // We succeeded, so commit our memory reservation. | ||
| 2174 | memory_reservation.Commit(); | ||
| 2175 | |||
| 2176 | // Apply the memory block update. | ||
| 2177 | m_memory_block_manager.Update( | ||
| 2178 | std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, | ||
| 2179 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 2180 | m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 2181 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 2182 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2183 | |||
| 2184 | // Update the current heap end. | ||
| 2185 | m_current_heap_end = m_heap_region_start + size; | ||
| 2186 | |||
| 2187 | // Set the output. | ||
| 2188 | *out = m_heap_region_start; | ||
| 2189 | R_SUCCEED(); | ||
| 2190 | } | ||
| 2191 | } | ||
| 2192 | |||
| 2193 | Result KPageTableBase::SetMaxHeapSize(size_t size) { | ||
| 2194 | // Lock the table. | ||
| 2195 | KScopedLightLock lk(m_general_lock); | ||
| 2196 | |||
| 2197 | // Only process page tables are allowed to set heap size. | ||
| 2198 | ASSERT(!this->IsKernel()); | ||
| 2199 | |||
| 2200 | m_max_heap_size = size; | ||
| 2201 | |||
| 2202 | R_SUCCEED(); | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, | ||
| 2206 | KProcessAddress addr) const { | ||
| 2207 | // If the address is invalid, create a fake block. | ||
| 2208 | if (!this->Contains(addr, 1)) { | ||
| 2209 | *out_info = { | ||
| 2210 | .m_address = GetInteger(m_address_space_end), | ||
| 2211 | .m_size = 0 - GetInteger(m_address_space_end), | ||
| 2212 | .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), | ||
| 2213 | .m_device_disable_merge_left_count = 0, | ||
| 2214 | .m_device_disable_merge_right_count = 0, | ||
| 2215 | .m_ipc_lock_count = 0, | ||
| 2216 | .m_device_use_count = 0, | ||
| 2217 | .m_ipc_disable_merge_count = 0, | ||
| 2218 | .m_permission = KMemoryPermission::None, | ||
| 2219 | .m_attribute = KMemoryAttribute::None, | ||
| 2220 | .m_original_permission = KMemoryPermission::None, | ||
| 2221 | .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, | ||
| 2222 | }; | ||
| 2223 | out_page_info->flags = 0; | ||
| 2224 | |||
| 2225 | R_SUCCEED(); | ||
| 2226 | } | ||
| 2227 | |||
| 2228 | // Otherwise, lock the table and query. | ||
| 2229 | KScopedLightLock lk(m_general_lock); | ||
| 2230 | R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr)); | ||
| 2231 | } | ||
| 2232 | |||
| 2233 | Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, | ||
| 2234 | KProcessAddress address) const { | ||
| 2235 | // Lock the table. | ||
| 2236 | KScopedLightLock lk(m_general_lock); | ||
| 2237 | |||
| 2238 | // Align the address down to page size. | ||
| 2239 | address = Common::AlignDown(GetInteger(address), PageSize); | ||
| 2240 | |||
| 2241 | // Verify that we can query the address. | ||
| 2242 | KMemoryInfo info; | ||
| 2243 | Svc::PageInfo page_info; | ||
| 2244 | R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address)); | ||
| 2245 | |||
| 2246 | // Check the memory state. | ||
| 2247 | R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical, | ||
| 2248 | KMemoryState::FlagCanQueryPhysical, | ||
| 2249 | KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead, | ||
| 2250 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2251 | |||
| 2252 | // Prepare to traverse. | ||
| 2253 | KPhysicalAddress phys_addr; | ||
| 2254 | size_t phys_size; | ||
| 2255 | |||
| 2256 | KProcessAddress virt_addr = info.GetAddress(); | ||
| 2257 | KProcessAddress end_addr = info.GetEndAddress(); | ||
| 2258 | |||
| 2259 | // Perform traversal. | ||
| 2260 | { | ||
| 2261 | // Begin traversal. | ||
| 2262 | TraversalContext context; | ||
| 2263 | TraversalEntry next_entry; | ||
| 2264 | bool traverse_valid = | ||
| 2265 | m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); | ||
| 2266 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 2267 | |||
| 2268 | // Set tracking variables. | ||
| 2269 | phys_addr = next_entry.phys_addr; | ||
| 2270 | phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); | ||
| 2271 | |||
| 2272 | // Iterate. | ||
| 2273 | while (true) { | ||
| 2274 | // Continue the traversal. | ||
| 2275 | traverse_valid = | ||
| 2276 | m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 2277 | if (!traverse_valid) { | ||
| 2278 | break; | ||
| 2279 | } | ||
| 2280 | |||
| 2281 | if (next_entry.phys_addr != (phys_addr + phys_size)) { | ||
| 2282 | // Check if we're done. | ||
| 2283 | if (virt_addr <= address && address <= virt_addr + phys_size - 1) { | ||
| 2284 | break; | ||
| 2285 | } | ||
| 2286 | |||
| 2287 | // Advance. | ||
| 2288 | phys_addr = next_entry.phys_addr; | ||
| 2289 | virt_addr += next_entry.block_size; | ||
| 2290 | phys_size = | ||
| 2291 | next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); | ||
| 2292 | } else { | ||
| 2293 | phys_size += next_entry.block_size; | ||
| 2294 | } | ||
| 2295 | |||
| 2296 | // Check if we're done. | ||
| 2297 | if (end_addr < virt_addr + phys_size) { | ||
| 2298 | break; | ||
| 2299 | } | ||
| 2300 | } | ||
| 2301 | ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1); | ||
| 2302 | |||
| 2303 | // Ensure we use the right size. | ||
| 2304 | if (end_addr < virt_addr + phys_size) { | ||
| 2305 | phys_size = end_addr - virt_addr; | ||
| 2306 | } | ||
| 2307 | } | ||
| 2308 | |||
| 2309 | // Set the output. | ||
| 2310 | out->physical_address = GetInteger(phys_addr); | ||
| 2311 | out->virtual_address = GetInteger(virt_addr); | ||
| 2312 | out->size = phys_size; | ||
| 2313 | R_SUCCEED(); | ||
| 2314 | } | ||
| 2315 | |||
| 2316 | Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, | ||
| 2317 | KPhysicalAddress phys_addr, size_t size, KMemoryState state, | ||
| 2318 | KMemoryPermission perm) { | ||
| 2319 | // Check pre-conditions. | ||
| 2320 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 2321 | ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize)); | ||
| 2322 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 2323 | ASSERT(size > 0); | ||
| 2324 | |||
| 2325 | R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress); | ||
| 2326 | const size_t num_pages = size / PageSize; | ||
| 2327 | const KPhysicalAddress last = phys_addr + size - 1; | ||
| 2328 | |||
| 2329 | // Get region extents. | ||
| 2330 | const KProcessAddress region_start = m_kernel_map_region_start; | ||
| 2331 | const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 2332 | const size_t region_num_pages = region_size / PageSize; | ||
| 2333 | |||
| 2334 | ASSERT(this->CanContain(region_start, region_size, state)); | ||
| 2335 | |||
| 2336 | // Locate the memory region. | ||
| 2337 | const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr); | ||
| 2338 | R_UNLESS(region != nullptr, ResultInvalidAddress); | ||
| 2339 | |||
| 2340 | ASSERT(region->Contains(GetInteger(phys_addr))); | ||
| 2341 | |||
| 2342 | // Ensure that the region is mappable. | ||
| 2343 | const bool is_rw = perm == KMemoryPermission::UserReadWrite; | ||
| 2344 | while (true) { | ||
| 2345 | // Check that the region exists. | ||
| 2346 | R_UNLESS(region != nullptr, ResultInvalidAddress); | ||
| 2347 | |||
| 2348 | // Check the region attributes. | ||
| 2349 | R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress); | ||
| 2350 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, | ||
| 2351 | ResultInvalidAddress); | ||
| 2352 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress); | ||
| 2353 | |||
| 2354 | // Check if we're done. | ||
| 2355 | if (GetInteger(last) <= region->GetLastAddress()) { | ||
| 2356 | break; | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | // Advance. | ||
| 2360 | region = region->GetNext(); | ||
| 2361 | }; | ||
| 2362 | |||
| 2363 | // Select an address to map at. | ||
| 2364 | KProcessAddress addr = 0; | ||
| 2365 | { | ||
| 2366 | const size_t alignment = 4_KiB; | ||
| 2367 | const KPhysicalAddress aligned_phys = | ||
| 2368 | Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1; | ||
| 2369 | R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress); | ||
| 2370 | |||
| 2371 | const KPhysicalAddress last_aligned_paddr = | ||
| 2372 | Common::AlignDown(GetInteger(last) + 1, alignment) - 1; | ||
| 2373 | R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr), | ||
| 2374 | ResultInvalidAddress); | ||
| 2375 | |||
| 2376 | addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | ||
| 2377 | this->GetNumGuardPages()); | ||
| 2378 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2379 | } | ||
| 2380 | |||
| 2381 | // Check that we can map IO here. | ||
| 2382 | ASSERT(this->CanContain(addr, size, state)); | ||
| 2383 | R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | ||
| 2384 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2385 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2386 | |||
| 2387 | // Perform mapping operation. | ||
| 2388 | const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false, | ||
| 2389 | DisableMergeAttribute::DisableHead}; | ||
| 2390 | R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map, | ||
| 2391 | false)); | ||
| 2392 | |||
| 2393 | // Set the output address. | ||
| 2394 | *out = addr; | ||
| 2395 | |||
| 2396 | R_SUCCEED(); | ||
| 2397 | } | ||
| 2398 | |||
| 2399 | Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 2400 | // Lock the table. | ||
| 2401 | KScopedLightLock lk(m_general_lock); | ||
| 2402 | |||
| 2403 | // Create an update allocator. | ||
| 2404 | Result allocator_result; | ||
| 2405 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2406 | m_memory_block_slab_manager); | ||
| 2407 | R_TRY(allocator_result); | ||
| 2408 | |||
| 2409 | // We're going to perform an update, so create a helper. | ||
| 2410 | KScopedPageTableUpdater updater(this); | ||
| 2411 | |||
| 2412 | // Map the io memory. | ||
| 2413 | KProcessAddress addr; | ||
| 2414 | R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, | ||
| 2415 | KMemoryState::IoRegister, perm)); | ||
| 2416 | |||
| 2417 | // Update the blocks. | ||
| 2418 | m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, | ||
| 2419 | KMemoryState::IoRegister, perm, KMemoryAttribute::Locked, | ||
| 2420 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2421 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2422 | |||
| 2423 | // We successfully mapped the pages. | ||
| 2424 | R_SUCCEED(); | ||
| 2425 | } | ||
| 2426 | |||
| 2427 | Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, | ||
| 2428 | size_t size, Svc::MemoryMapping mapping, | ||
| 2429 | Svc::MemoryPermission svc_perm) { | ||
| 2430 | const size_t num_pages = size / PageSize; | ||
| 2431 | |||
| 2432 | // Lock the table. | ||
| 2433 | KScopedLightLock lk(m_general_lock); | ||
| 2434 | |||
| 2435 | // Validate the memory state. | ||
| 2436 | size_t num_allocator_blocks; | ||
| 2437 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, | ||
| 2438 | KMemoryState::All, KMemoryState::None, KMemoryPermission::None, | ||
| 2439 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2440 | KMemoryAttribute::None)); | ||
| 2441 | |||
| 2442 | // Create an update allocator. | ||
| 2443 | Result allocator_result; | ||
| 2444 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2445 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2446 | R_TRY(allocator_result); | ||
| 2447 | |||
| 2448 | // We're going to perform an update, so create a helper. | ||
| 2449 | KScopedPageTableUpdater updater(this); | ||
| 2450 | |||
| 2451 | // Perform mapping operation. | ||
| 2452 | const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm); | ||
| 2453 | const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister, | ||
| 2454 | mapping == Svc::MemoryMapping::Uncached, | ||
| 2455 | DisableMergeAttribute::DisableHead}; | ||
| 2456 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, | ||
| 2457 | OperationType::Map, false)); | ||
| 2458 | |||
| 2459 | // Update the blocks. | ||
| 2460 | const auto state = | ||
| 2461 | mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister; | ||
| 2462 | m_memory_block_manager.Update( | ||
| 2463 | std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked, | ||
| 2464 | KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None); | ||
| 2465 | |||
| 2466 | // We successfully mapped the pages. | ||
| 2467 | R_SUCCEED(); | ||
| 2468 | } | ||
| 2469 | |||
| 2470 | Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, | ||
| 2471 | size_t size, Svc::MemoryMapping mapping) { | ||
| 2472 | const size_t num_pages = size / PageSize; | ||
| 2473 | |||
| 2474 | // Lock the table. | ||
| 2475 | KScopedLightLock lk(m_general_lock); | ||
| 2476 | |||
| 2477 | // Validate the memory state. | ||
| 2478 | KMemoryState old_state; | ||
| 2479 | KMemoryPermission old_perm; | ||
| 2480 | KMemoryAttribute old_attr; | ||
| 2481 | size_t num_allocator_blocks; | ||
| 2482 | R_TRY(this->CheckMemoryState( | ||
| 2483 | std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), | ||
| 2484 | std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All, | ||
| 2485 | mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister, | ||
| 2486 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2487 | KMemoryAttribute::Locked)); | ||
| 2488 | |||
| 2489 | // Validate that the region being unmapped corresponds to the physical range described. | ||
| 2490 | { | ||
| 2491 | // Get the impl. | ||
| 2492 | auto& impl = this->GetImpl(); | ||
| 2493 | |||
| 2494 | // Begin traversal. | ||
| 2495 | TraversalContext context; | ||
| 2496 | TraversalEntry next_entry; | ||
| 2497 | ASSERT( | ||
| 2498 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address)); | ||
| 2499 | |||
| 2500 | // Check that the physical region matches. | ||
| 2501 | R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion); | ||
| 2502 | |||
| 2503 | // Iterate. | ||
| 2504 | for (size_t checked_size = | ||
| 2505 | next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); | ||
| 2506 | checked_size < size; checked_size += next_entry.block_size) { | ||
| 2507 | // Continue the traversal. | ||
| 2508 | ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))); | ||
| 2509 | |||
| 2510 | // Check that the physical region matches. | ||
| 2511 | R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion); | ||
| 2512 | } | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | // Create an update allocator. | ||
| 2516 | Result allocator_result; | ||
| 2517 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2518 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2519 | R_TRY(allocator_result); | ||
| 2520 | |||
| 2521 | // We're going to perform an update, so create a helper. | ||
| 2522 | KScopedPageTableUpdater updater(this); | ||
| 2523 | |||
| 2524 | // If the region being unmapped is Memory, synchronize. | ||
| 2525 | if (mapping == Svc::MemoryMapping::Memory) { | ||
| 2526 | // Change the region to be uncached. | ||
| 2527 | const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None}; | ||
| 2528 | R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties, | ||
| 2529 | OperationType::ChangePermissionsAndRefresh, false)); | ||
| 2530 | |||
| 2531 | // Temporarily unlock ourselves, so that other operations can occur while we flush the | ||
| 2532 | // region. | ||
| 2533 | m_general_lock.Unlock(); | ||
| 2534 | SCOPE_EXIT({ m_general_lock.Lock(); }); | ||
| 2535 | |||
| 2536 | // Flush the region. | ||
| 2537 | R_ASSERT(FlushDataCache(dst_address, size)); | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | // Perform the unmap. | ||
| 2541 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2542 | DisableMergeAttribute::None}; | ||
| 2543 | R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, | ||
| 2544 | unmap_properties, OperationType::Unmap, false)); | ||
| 2545 | |||
| 2546 | // Update the blocks. | ||
| 2547 | m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, | ||
| 2548 | KMemoryState::Free, KMemoryPermission::None, | ||
| 2549 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 2550 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2551 | |||
| 2552 | R_SUCCEED(); | ||
| 2553 | } | ||
| 2554 | |||
| 2555 | Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 2556 | ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize)); | ||
| 2557 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 2558 | ASSERT(size > 0); | ||
| 2559 | R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress); | ||
| 2560 | const size_t num_pages = size / PageSize; | ||
| 2561 | const KPhysicalAddress last = phys_addr + size - 1; | ||
| 2562 | |||
| 2563 | // Get region extents. | ||
| 2564 | const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static); | ||
| 2565 | const size_t region_size = this->GetRegionSize(KMemoryState::Static); | ||
| 2566 | const size_t region_num_pages = region_size / PageSize; | ||
| 2567 | |||
| 2568 | // Locate the memory region. | ||
| 2569 | const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr); | ||
| 2570 | R_UNLESS(region != nullptr, ResultInvalidAddress); | ||
| 2571 | |||
| 2572 | ASSERT(region->Contains(GetInteger(phys_addr))); | ||
| 2573 | R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress); | ||
| 2574 | |||
| 2575 | // Check the region attributes. | ||
| 2576 | const bool is_rw = perm == KMemoryPermission::UserReadWrite; | ||
| 2577 | R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress); | ||
| 2578 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress); | ||
| 2579 | R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, | ||
| 2580 | ResultInvalidAddress); | ||
| 2581 | |||
| 2582 | // Lock the table. | ||
| 2583 | KScopedLightLock lk(m_general_lock); | ||
| 2584 | |||
| 2585 | // Select an address to map at. | ||
| 2586 | KProcessAddress addr = 0; | ||
| 2587 | { | ||
| 2588 | const size_t alignment = 4_KiB; | ||
| 2589 | const KPhysicalAddress aligned_phys = | ||
| 2590 | Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1; | ||
| 2591 | R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress); | ||
| 2592 | |||
| 2593 | const KPhysicalAddress last_aligned_paddr = | ||
| 2594 | Common::AlignDown(GetInteger(last) + 1, alignment) - 1; | ||
| 2595 | R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr), | ||
| 2596 | ResultInvalidAddress); | ||
| 2597 | |||
| 2598 | addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | ||
| 2599 | this->GetNumGuardPages()); | ||
| 2600 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2601 | } | ||
| 2602 | |||
| 2603 | // Check that we can map static here. | ||
| 2604 | ASSERT(this->CanContain(addr, size, KMemoryState::Static)); | ||
| 2605 | R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | ||
| 2606 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 2607 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2608 | |||
| 2609 | // Create an update allocator. | ||
| 2610 | Result allocator_result; | ||
| 2611 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2612 | m_memory_block_slab_manager); | ||
| 2613 | R_TRY(allocator_result); | ||
| 2614 | |||
| 2615 | // We're going to perform an update, so create a helper. | ||
| 2616 | KScopedPageTableUpdater updater(this); | ||
| 2617 | |||
| 2618 | // Perform mapping operation. | ||
| 2619 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2620 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, | ||
| 2621 | OperationType::Map, false)); | ||
| 2622 | |||
| 2623 | // Update the blocks. | ||
| 2624 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static, | ||
| 2625 | perm, KMemoryAttribute::None, | ||
| 2626 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2627 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2628 | |||
| 2629 | // We successfully mapped the pages. | ||
| 2630 | R_SUCCEED(); | ||
| 2631 | } | ||
| 2632 | |||
| 2633 | Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { | ||
| 2634 | // Get the memory region. | ||
| 2635 | const KMemoryRegion* region = | ||
| 2636 | m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type); | ||
| 2637 | R_UNLESS(region != nullptr, ResultOutOfRange); | ||
| 2638 | |||
| 2639 | // Check that the region is valid. | ||
| 2640 | ASSERT(region->GetEndAddress() != 0); | ||
| 2641 | |||
| 2642 | // Map the region. | ||
| 2643 | R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){ | ||
| 2644 | R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH; | ||
| 2645 | |||
| 2646 | R_SUCCEED(); | ||
| 2647 | } | ||
| 2648 | |||
| 2649 | Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 2650 | KPhysicalAddress phys_addr, bool is_pa_valid, | ||
| 2651 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2652 | KMemoryState state, KMemoryPermission perm) { | ||
| 2653 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | ||
| 2654 | |||
| 2655 | // Ensure this is a valid map request. | ||
| 2656 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2657 | ResultInvalidCurrentMemory); | ||
| 2658 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2659 | |||
| 2660 | // Lock the table. | ||
| 2661 | KScopedLightLock lk(m_general_lock); | ||
| 2662 | |||
| 2663 | // Find a random address to map at. | ||
| 2664 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, | ||
| 2665 | 0, this->GetNumGuardPages()); | ||
| 2666 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2667 | ASSERT(Common::IsAligned(GetInteger(addr), alignment)); | ||
| 2668 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2669 | R_ASSERT(this->CheckMemoryState( | ||
| 2670 | addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2671 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2672 | |||
| 2673 | // Create an update allocator. | ||
| 2674 | Result allocator_result; | ||
| 2675 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2676 | m_memory_block_slab_manager); | ||
| 2677 | R_TRY(allocator_result); | ||
| 2678 | |||
| 2679 | // We're going to perform an update, so create a helper. | ||
| 2680 | KScopedPageTableUpdater updater(this); | ||
| 2681 | |||
| 2682 | // Perform mapping operation. | ||
| 2683 | if (is_pa_valid) { | ||
| 2684 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2685 | R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, | ||
| 2686 | OperationType::Map, false)); | ||
| 2687 | } else { | ||
| 2688 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm)); | ||
| 2689 | } | ||
| 2690 | |||
| 2691 | // Update the blocks. | ||
| 2692 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2693 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2694 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2695 | |||
| 2696 | // We successfully mapped the pages. | ||
| 2697 | *out_addr = addr; | ||
| 2698 | R_SUCCEED(); | ||
| 2699 | } | ||
| 2700 | |||
| 2701 | Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 2702 | KMemoryPermission perm) { | ||
| 2703 | // Check that the map is in range. | ||
| 2704 | const size_t size = num_pages * PageSize; | ||
| 2705 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2706 | |||
| 2707 | // Lock the table. | ||
| 2708 | KScopedLightLock lk(m_general_lock); | ||
| 2709 | |||
| 2710 | // Check the memory state. | ||
| 2711 | size_t num_allocator_blocks; | ||
| 2712 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2713 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2714 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2715 | KMemoryAttribute::None)); | ||
| 2716 | |||
| 2717 | // Create an update allocator. | ||
| 2718 | Result allocator_result; | ||
| 2719 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2720 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2721 | R_TRY(allocator_result); | ||
| 2722 | |||
| 2723 | // We're going to perform an update, so create a helper. | ||
| 2724 | KScopedPageTableUpdater updater(this); | ||
| 2725 | |||
| 2726 | // Map the pages. | ||
| 2727 | R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm)); | ||
| 2728 | |||
| 2729 | // Update the blocks. | ||
| 2730 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, | ||
| 2731 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2732 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2733 | |||
| 2734 | R_SUCCEED(); | ||
| 2735 | } | ||
| 2736 | |||
| 2737 | Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { | ||
| 2738 | // Check that the unmap is in range. | ||
| 2739 | const size_t size = num_pages * PageSize; | ||
| 2740 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2741 | |||
| 2742 | // Lock the table. | ||
| 2743 | KScopedLightLock lk(m_general_lock); | ||
| 2744 | |||
| 2745 | // Check the memory state. | ||
| 2746 | size_t num_allocator_blocks; | ||
| 2747 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2748 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2749 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2750 | KMemoryAttribute::None)); | ||
| 2751 | |||
| 2752 | // Create an update allocator. | ||
| 2753 | Result allocator_result; | ||
| 2754 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2755 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2756 | R_TRY(allocator_result); | ||
| 2757 | |||
| 2758 | // We're going to perform an update, so create a helper. | ||
| 2759 | KScopedPageTableUpdater updater(this); | ||
| 2760 | |||
| 2761 | // Perform the unmap. | ||
| 2762 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 2763 | DisableMergeAttribute::None}; | ||
| 2764 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties, | ||
| 2765 | OperationType::Unmap, false)); | ||
| 2766 | |||
| 2767 | // Update the blocks. | ||
| 2768 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2769 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2770 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2771 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2772 | |||
| 2773 | R_SUCCEED(); | ||
| 2774 | } | ||
| 2775 | |||
| 2776 | Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 2777 | KProcessAddress region_start, size_t region_num_pages, | ||
| 2778 | KMemoryState state, KMemoryPermission perm) { | ||
| 2779 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2780 | |||
| 2781 | // Ensure this is a valid map request. | ||
| 2782 | const size_t num_pages = pg.GetNumPages(); | ||
| 2783 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 2784 | ResultInvalidCurrentMemory); | ||
| 2785 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 2786 | |||
| 2787 | // Lock the table. | ||
| 2788 | KScopedLightLock lk(m_general_lock); | ||
| 2789 | |||
| 2790 | // Find a random address to map at. | ||
| 2791 | KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, | ||
| 2792 | 0, this->GetNumGuardPages()); | ||
| 2793 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 2794 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 2795 | R_ASSERT(this->CheckMemoryState( | ||
| 2796 | addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2797 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 2798 | |||
| 2799 | // Create an update allocator. | ||
| 2800 | Result allocator_result; | ||
| 2801 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2802 | m_memory_block_slab_manager); | ||
| 2803 | R_TRY(allocator_result); | ||
| 2804 | |||
| 2805 | // We're going to perform an update, so create a helper. | ||
| 2806 | KScopedPageTableUpdater updater(this); | ||
| 2807 | |||
| 2808 | // Perform mapping operation. | ||
| 2809 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2810 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2811 | |||
| 2812 | // Update the blocks. | ||
| 2813 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2814 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2815 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2816 | |||
| 2817 | // We successfully mapped the pages. | ||
| 2818 | *out_addr = addr; | ||
| 2819 | R_SUCCEED(); | ||
| 2820 | } | ||
| 2821 | |||
| 2822 | Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 2823 | KMemoryPermission perm) { | ||
| 2824 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2825 | |||
| 2826 | // Ensure this is a valid map request. | ||
| 2827 | const size_t num_pages = pg.GetNumPages(); | ||
| 2828 | const size_t size = num_pages * PageSize; | ||
| 2829 | R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); | ||
| 2830 | |||
| 2831 | // Lock the table. | ||
| 2832 | KScopedLightLock lk(m_general_lock); | ||
| 2833 | |||
| 2834 | // Check if state allows us to map. | ||
| 2835 | size_t num_allocator_blocks; | ||
| 2836 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, | ||
| 2837 | KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, | ||
| 2838 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2839 | KMemoryAttribute::None)); | ||
| 2840 | |||
| 2841 | // Create an update allocator. | ||
| 2842 | Result allocator_result; | ||
| 2843 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2844 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2845 | R_TRY(allocator_result); | ||
| 2846 | |||
| 2847 | // We're going to perform an update, so create a helper. | ||
| 2848 | KScopedPageTableUpdater updater(this); | ||
| 2849 | |||
| 2850 | // Perform mapping operation. | ||
| 2851 | const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; | ||
| 2852 | R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); | ||
| 2853 | |||
| 2854 | // Update the blocks. | ||
| 2855 | m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, | ||
| 2856 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, | ||
| 2857 | KMemoryBlockDisableMergeAttribute::None); | ||
| 2858 | |||
| 2859 | // We successfully mapped the pages. | ||
| 2860 | R_SUCCEED(); | ||
| 2861 | } | ||
| 2862 | |||
| 2863 | Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, | ||
| 2864 | KMemoryState state) { | ||
| 2865 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 2866 | |||
| 2867 | // Ensure this is a valid unmap request. | ||
| 2868 | const size_t num_pages = pg.GetNumPages(); | ||
| 2869 | const size_t size = num_pages * PageSize; | ||
| 2870 | R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); | ||
| 2871 | |||
| 2872 | // Lock the table. | ||
| 2873 | KScopedLightLock lk(m_general_lock); | ||
| 2874 | |||
| 2875 | // Check if state allows us to unmap. | ||
| 2876 | size_t num_allocator_blocks; | ||
| 2877 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 2878 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 2879 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2880 | KMemoryAttribute::None)); | ||
| 2881 | |||
| 2882 | // Check that the page group is valid. | ||
| 2883 | R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory); | ||
| 2884 | |||
| 2885 | // Create an update allocator. | ||
| 2886 | Result allocator_result; | ||
| 2887 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 2888 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 2889 | R_TRY(allocator_result); | ||
| 2890 | |||
| 2891 | // We're going to perform an update, so create a helper. | ||
| 2892 | KScopedPageTableUpdater updater(this); | ||
| 2893 | |||
| 2894 | // Perform unmapping operation. | ||
| 2895 | const KPageProperties properties = {KMemoryPermission::None, false, false, | ||
| 2896 | DisableMergeAttribute::None}; | ||
| 2897 | R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties, | ||
| 2898 | OperationType::Unmap, false)); | ||
| 2899 | |||
| 2900 | // Update the blocks. | ||
| 2901 | m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, | ||
| 2902 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 2903 | KMemoryBlockDisableMergeAttribute::None, | ||
| 2904 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 2905 | |||
| 2906 | R_SUCCEED(); | ||
| 2907 | } | ||
| 2908 | |||
| 2909 | Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, | ||
| 2910 | size_t num_pages, KMemoryState state_mask, | ||
| 2911 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 2912 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 2913 | KMemoryAttribute attr) { | ||
| 2914 | // Ensure that the page group isn't null. | ||
| 2915 | ASSERT(out != nullptr); | ||
| 2916 | |||
| 2917 | // Make sure that the region we're mapping is valid for the table. | ||
| 2918 | const size_t size = num_pages * PageSize; | ||
| 2919 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2920 | |||
| 2921 | // Lock the table. | ||
| 2922 | KScopedLightLock lk(m_general_lock); | ||
| 2923 | |||
| 2924 | // Check if state allows us to create the group. | ||
| 2925 | R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, | ||
| 2926 | state | KMemoryState::FlagReferenceCounted, perm_mask, perm, | ||
| 2927 | attr_mask, attr)); | ||
| 2928 | |||
| 2929 | // Create a new page group for the region. | ||
| 2930 | R_TRY(this->MakePageGroup(*out, address, num_pages)); | ||
| 2931 | |||
| 2932 | // Open a new reference to the pages in the group. | ||
| 2933 | out->Open(); | ||
| 2934 | |||
| 2935 | R_SUCCEED(); | ||
| 2936 | } | ||
| 2937 | |||
| 2938 | Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) { | ||
| 2939 | // Check that the region is in range. | ||
| 2940 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 2941 | |||
| 2942 | // Lock the table. | ||
| 2943 | KScopedLightLock lk(m_general_lock); | ||
| 2944 | |||
| 2945 | // Check the memory state. | ||
| 2946 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 2947 | address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, | ||
| 2948 | KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite, | ||
| 2949 | KMemoryAttribute::Uncached, KMemoryAttribute::None)); | ||
| 2950 | |||
| 2951 | // Get the impl. | ||
| 2952 | auto& impl = this->GetImpl(); | ||
| 2953 | |||
| 2954 | // Begin traversal. | ||
| 2955 | TraversalContext context; | ||
| 2956 | TraversalEntry next_entry; | ||
| 2957 | bool traverse_valid = | ||
| 2958 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address); | ||
| 2959 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 2960 | |||
| 2961 | // Prepare tracking variables. | ||
| 2962 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 2963 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 2964 | size_t tot_size = cur_size; | ||
| 2965 | |||
| 2966 | // Iterate. | ||
| 2967 | while (tot_size < size) { | ||
| 2968 | // Continue the traversal. | ||
| 2969 | traverse_valid = | ||
| 2970 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 2971 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 2972 | |||
| 2973 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 2974 | // Check that the pages are linearly mapped. | ||
| 2975 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 2976 | |||
| 2977 | // Invalidate the block. | ||
| 2978 | if (cur_size > 0) { | ||
| 2979 | // NOTE: Nintendo does not check the result of invalidation. | ||
| 2980 | InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 2981 | } | ||
| 2982 | |||
| 2983 | // Advance. | ||
| 2984 | cur_addr = next_entry.phys_addr; | ||
| 2985 | cur_size = next_entry.block_size; | ||
| 2986 | } else { | ||
| 2987 | cur_size += next_entry.block_size; | ||
| 2988 | } | ||
| 2989 | |||
| 2990 | tot_size += next_entry.block_size; | ||
| 2991 | } | ||
| 2992 | |||
| 2993 | // Ensure we use the right size for the last block. | ||
| 2994 | if (tot_size > size) { | ||
| 2995 | cur_size -= (tot_size - size); | ||
| 2996 | } | ||
| 2997 | |||
| 2998 | // Check that the last block is linearly mapped. | ||
| 2999 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3000 | |||
| 3001 | // Invalidate the last block. | ||
| 3002 | if (cur_size > 0) { | ||
| 3003 | // NOTE: Nintendo does not check the result of invalidation. | ||
| 3004 | InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 3005 | } | ||
| 3006 | |||
| 3007 | R_SUCCEED(); | ||
| 3008 | } | ||
| 3009 | |||
| 3010 | Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) { | ||
| 3011 | // Check pre-condition: this is being called on the current process. | ||
| 3012 | ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable())); | ||
| 3013 | |||
| 3014 | // Check that the region is in range. | ||
| 3015 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3016 | |||
| 3017 | // Lock the table. | ||
| 3018 | KScopedLightLock lk(m_general_lock); | ||
| 3019 | |||
| 3020 | // Check the memory state. | ||
| 3021 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3022 | address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, | ||
| 3023 | KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite, | ||
| 3024 | KMemoryAttribute::Uncached, KMemoryAttribute::None)); | ||
| 3025 | |||
| 3026 | // Invalidate the data cache. | ||
| 3027 | R_RETURN(InvalidateDataCache(address, size)); | ||
| 3028 | } | ||
| 3029 | |||
| 3030 | Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3031 | size_t size) { | ||
| 3032 | // Lightly validate the region is in range. | ||
| 3033 | R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory); | ||
| 3034 | |||
| 3035 | // Lock the table. | ||
| 3036 | KScopedLightLock lk(m_general_lock); | ||
| 3037 | |||
| 3038 | // Require that the memory either be user readable or debuggable. | ||
| 3039 | const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3040 | src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead, | ||
| 3041 | KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3042 | if (!can_read) { | ||
| 3043 | const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3044 | src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug, | ||
| 3045 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, | ||
| 3046 | KMemoryAttribute::None)); | ||
| 3047 | R_UNLESS(can_debug, ResultInvalidCurrentMemory); | ||
| 3048 | } | ||
| 3049 | |||
| 3050 | // Get the impl. | ||
| 3051 | auto& impl = this->GetImpl(); | ||
| 3052 | auto& dst_memory = GetCurrentMemory(m_system.Kernel()); | ||
| 3053 | |||
| 3054 | // Begin traversal. | ||
| 3055 | TraversalContext context; | ||
| 3056 | TraversalEntry next_entry; | ||
| 3057 | bool traverse_valid = | ||
| 3058 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address); | ||
| 3059 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 3060 | |||
| 3061 | // Prepare tracking variables. | ||
| 3062 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3063 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3064 | size_t tot_size = cur_size; | ||
| 3065 | |||
| 3066 | auto PerformCopy = [&]() -> Result { | ||
| 3067 | // Ensure the address is linear mapped. | ||
| 3068 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3069 | |||
| 3070 | // Copy as much aligned data as we can. | ||
| 3071 | if (cur_size >= sizeof(u32)) { | ||
| 3072 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3073 | const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3074 | FlushDataCache(copy_src, copy_size); | ||
| 3075 | R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer); | ||
| 3076 | |||
| 3077 | dst_address += copy_size; | ||
| 3078 | cur_addr += copy_size; | ||
| 3079 | cur_size -= copy_size; | ||
| 3080 | } | ||
| 3081 | |||
| 3082 | // Copy remaining data. | ||
| 3083 | if (cur_size > 0) { | ||
| 3084 | const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3085 | FlushDataCache(copy_src, cur_size); | ||
| 3086 | R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer); | ||
| 3087 | } | ||
| 3088 | |||
| 3089 | R_SUCCEED(); | ||
| 3090 | }; | ||
| 3091 | |||
| 3092 | // Iterate. | ||
| 3093 | while (tot_size < size) { | ||
| 3094 | // Continue the traversal. | ||
| 3095 | traverse_valid = | ||
| 3096 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3097 | ASSERT(traverse_valid); | ||
| 3098 | |||
| 3099 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3100 | // Perform copy. | ||
| 3101 | R_TRY(PerformCopy()); | ||
| 3102 | |||
| 3103 | // Advance. | ||
| 3104 | dst_address += cur_size; | ||
| 3105 | |||
| 3106 | cur_addr = next_entry.phys_addr; | ||
| 3107 | cur_size = next_entry.block_size; | ||
| 3108 | } else { | ||
| 3109 | cur_size += next_entry.block_size; | ||
| 3110 | } | ||
| 3111 | |||
| 3112 | tot_size += next_entry.block_size; | ||
| 3113 | } | ||
| 3114 | |||
| 3115 | // Ensure we use the right size for the last block. | ||
| 3116 | if (tot_size > size) { | ||
| 3117 | cur_size -= (tot_size - size); | ||
| 3118 | } | ||
| 3119 | |||
| 3120 | // Perform copy for the last block. | ||
| 3121 | R_TRY(PerformCopy()); | ||
| 3122 | |||
| 3123 | R_SUCCEED(); | ||
| 3124 | } | ||
| 3125 | |||
| 3126 | Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3127 | size_t size) { | ||
| 3128 | // Lightly validate the region is in range. | ||
| 3129 | R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory); | ||
| 3130 | |||
| 3131 | // Lock the table. | ||
| 3132 | KScopedLightLock lk(m_general_lock); | ||
| 3133 | |||
| 3134 | // Require that the memory either be user writable or debuggable. | ||
| 3135 | const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3136 | dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite, | ||
| 3137 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3138 | if (!can_read) { | ||
| 3139 | const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous( | ||
| 3140 | dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug, | ||
| 3141 | KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, | ||
| 3142 | KMemoryAttribute::None)); | ||
| 3143 | R_UNLESS(can_debug, ResultInvalidCurrentMemory); | ||
| 3144 | } | ||
| 3145 | |||
| 3146 | // Get the impl. | ||
| 3147 | auto& impl = this->GetImpl(); | ||
| 3148 | auto& src_memory = GetCurrentMemory(m_system.Kernel()); | ||
| 3149 | |||
| 3150 | // Begin traversal. | ||
| 3151 | TraversalContext context; | ||
| 3152 | TraversalEntry next_entry; | ||
| 3153 | bool traverse_valid = | ||
| 3154 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address); | ||
| 3155 | R_UNLESS(traverse_valid, ResultInvalidCurrentMemory); | ||
| 3156 | |||
| 3157 | // Prepare tracking variables. | ||
| 3158 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3159 | size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3160 | size_t tot_size = cur_size; | ||
| 3161 | |||
| 3162 | auto PerformCopy = [&]() -> Result { | ||
| 3163 | // Ensure the address is linear mapped. | ||
| 3164 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3165 | |||
| 3166 | // Copy as much aligned data as we can. | ||
| 3167 | if (cur_size >= sizeof(u32)) { | ||
| 3168 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3169 | void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3170 | R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size), | ||
| 3171 | ResultInvalidCurrentMemory); | ||
| 3172 | |||
| 3173 | StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size); | ||
| 3174 | |||
| 3175 | src_address += copy_size; | ||
| 3176 | cur_addr += copy_size; | ||
| 3177 | cur_size -= copy_size; | ||
| 3178 | } | ||
| 3179 | |||
| 3180 | // Copy remaining data. | ||
| 3181 | if (cur_size > 0) { | ||
| 3182 | void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr); | ||
| 3183 | R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size), | ||
| 3184 | ResultInvalidCurrentMemory); | ||
| 3185 | |||
| 3186 | StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 3187 | } | ||
| 3188 | |||
| 3189 | R_SUCCEED(); | ||
| 3190 | }; | ||
| 3191 | |||
| 3192 | // Iterate. | ||
| 3193 | while (tot_size < size) { | ||
| 3194 | // Continue the traversal. | ||
| 3195 | traverse_valid = | ||
| 3196 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3197 | ASSERT(traverse_valid); | ||
| 3198 | |||
| 3199 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3200 | // Perform copy. | ||
| 3201 | R_TRY(PerformCopy()); | ||
| 3202 | |||
| 3203 | // Advance. | ||
| 3204 | src_address += cur_size; | ||
| 3205 | |||
| 3206 | cur_addr = next_entry.phys_addr; | ||
| 3207 | cur_size = next_entry.block_size; | ||
| 3208 | } else { | ||
| 3209 | cur_size += next_entry.block_size; | ||
| 3210 | } | ||
| 3211 | |||
| 3212 | tot_size += next_entry.block_size; | ||
| 3213 | } | ||
| 3214 | |||
| 3215 | // Ensure we use the right size for the last block. | ||
| 3216 | if (tot_size > size) { | ||
| 3217 | cur_size -= (tot_size - size); | ||
| 3218 | } | ||
| 3219 | |||
| 3220 | // Perform copy for the last block. | ||
| 3221 | R_TRY(PerformCopy()); | ||
| 3222 | |||
| 3223 | // Invalidate the instruction cache, as this svc allows modifying executable pages. | ||
| 3224 | InvalidateInstructionCache(m_system, dst_address, size); | ||
| 3225 | |||
| 3226 | R_SUCCEED(); | ||
| 3227 | } | ||
| 3228 | |||
| 3229 | Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, | ||
| 3230 | size_t size, KMemoryState state) { | ||
| 3231 | // Check pre-conditions. | ||
| 3232 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3233 | |||
| 3234 | // Determine the mapping extents. | ||
| 3235 | const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize); | ||
| 3236 | const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize); | ||
| 3237 | const size_t map_size = map_end - map_start; | ||
| 3238 | |||
| 3239 | // Get the memory reference to write into. | ||
| 3240 | auto& dst_memory = GetCurrentMemory(m_kernel); | ||
| 3241 | |||
| 3242 | // We're going to perform an update, so create a helper. | ||
| 3243 | KScopedPageTableUpdater updater(this); | ||
| 3244 | |||
| 3245 | // Temporarily map the io memory. | ||
| 3246 | KProcessAddress io_addr; | ||
| 3247 | R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, | ||
| 3248 | state, KMemoryPermission::UserRead)); | ||
| 3249 | |||
| 3250 | // Ensure we unmap the io memory when we're done with it. | ||
| 3251 | const KPageProperties unmap_properties = | ||
| 3252 | KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; | ||
| 3253 | SCOPE_EXIT({ | ||
| 3254 | R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, | ||
| 3255 | unmap_properties, OperationType::Unmap, true)); | ||
| 3256 | }); | ||
| 3257 | |||
| 3258 | // Read the memory. | ||
| 3259 | const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); | ||
| 3260 | dst_memory.CopyBlock(dst_addr, read_addr, size); | ||
| 3261 | |||
| 3262 | R_SUCCEED(); | ||
| 3263 | } | ||
| 3264 | |||
| 3265 | Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, | ||
| 3266 | size_t size, KMemoryState state) { | ||
| 3267 | // Check pre-conditions. | ||
| 3268 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 3269 | |||
| 3270 | // Determine the mapping extents. | ||
| 3271 | const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize); | ||
| 3272 | const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize); | ||
| 3273 | const size_t map_size = map_end - map_start; | ||
| 3274 | |||
| 3275 | // Get the memory reference to read from. | ||
| 3276 | auto& src_memory = GetCurrentMemory(m_kernel); | ||
| 3277 | |||
| 3278 | // We're going to perform an update, so create a helper. | ||
| 3279 | KScopedPageTableUpdater updater(this); | ||
| 3280 | |||
| 3281 | // Temporarily map the io memory. | ||
| 3282 | KProcessAddress io_addr; | ||
| 3283 | R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, | ||
| 3284 | state, KMemoryPermission::UserReadWrite)); | ||
| 3285 | |||
| 3286 | // Ensure we unmap the io memory when we're done with it. | ||
| 3287 | const KPageProperties unmap_properties = | ||
| 3288 | KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None}; | ||
| 3289 | SCOPE_EXIT({ | ||
| 3290 | R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false, | ||
| 3291 | unmap_properties, OperationType::Unmap, true)); | ||
| 3292 | }); | ||
| 3293 | |||
| 3294 | // Write the memory. | ||
| 3295 | const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1)); | ||
| 3296 | R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer); | ||
| 3297 | |||
| 3298 | R_SUCCEED(); | ||
| 3299 | } | ||
| 3300 | |||
| 3301 | Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3302 | size_t size, KMemoryState state) { | ||
| 3303 | // Lightly validate the range before doing anything else. | ||
| 3304 | R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory); | ||
| 3305 | |||
| 3306 | // We need to lock both this table, and the current process's table, so set up some aliases. | ||
| 3307 | KPageTableBase& src_page_table = *this; | ||
| 3308 | KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable(); | ||
| 3309 | |||
| 3310 | // Acquire the table locks. | ||
| 3311 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 3312 | |||
| 3313 | // Check that the desired range is readable io memory. | ||
| 3314 | R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state, | ||
| 3315 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, | ||
| 3316 | KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3317 | |||
| 3318 | // Read the memory. | ||
| 3319 | KProcessAddress dst = dst_address; | ||
| 3320 | const KProcessAddress last_address = src_address + size - 1; | ||
| 3321 | while (src_address <= last_address) { | ||
| 3322 | // Get the current physical address. | ||
| 3323 | KPhysicalAddress phys_addr; | ||
| 3324 | ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address)); | ||
| 3325 | |||
| 3326 | // Determine the current read size. | ||
| 3327 | const size_t cur_size = | ||
| 3328 | std::min<size_t>(last_address - src_address + 1, | ||
| 3329 | Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) - | ||
| 3330 | GetInteger(src_address)); | ||
| 3331 | |||
| 3332 | // Read. | ||
| 3333 | R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state)); | ||
| 3334 | |||
| 3335 | // Advance. | ||
| 3336 | src_address += cur_size; | ||
| 3337 | dst += cur_size; | ||
| 3338 | } | ||
| 3339 | |||
| 3340 | R_SUCCEED(); | ||
| 3341 | } | ||
| 3342 | |||
| 3343 | Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, | ||
| 3344 | size_t size, KMemoryState state) { | ||
| 3345 | // Lightly validate the range before doing anything else. | ||
| 3346 | R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory); | ||
| 3347 | |||
| 3348 | // We need to lock both this table, and the current process's table, so set up some aliases. | ||
| 3349 | KPageTableBase& src_page_table = *this; | ||
| 3350 | KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable(); | ||
| 3351 | |||
| 3352 | // Acquire the table locks. | ||
| 3353 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 3354 | |||
| 3355 | // Check that the desired range is writable io memory. | ||
| 3356 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3357 | dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite, | ||
| 3358 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None)); | ||
| 3359 | |||
| 3360 | // Read the memory. | ||
| 3361 | KProcessAddress src = src_address; | ||
| 3362 | const KProcessAddress last_address = dst_address + size - 1; | ||
| 3363 | while (dst_address <= last_address) { | ||
| 3364 | // Get the current physical address. | ||
| 3365 | KPhysicalAddress phys_addr; | ||
| 3366 | ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address)); | ||
| 3367 | |||
| 3368 | // Determine the current read size. | ||
| 3369 | const size_t cur_size = | ||
| 3370 | std::min<size_t>(last_address - dst_address + 1, | ||
| 3371 | Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) - | ||
| 3372 | GetInteger(dst_address)); | ||
| 3373 | |||
| 3374 | // Read. | ||
| 3375 | R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state)); | ||
| 3376 | |||
| 3377 | // Advance. | ||
| 3378 | dst_address += cur_size; | ||
| 3379 | src += cur_size; | ||
| 3380 | } | ||
| 3381 | |||
| 3382 | R_SUCCEED(); | ||
| 3383 | } | ||
| 3384 | |||
| 3385 | Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, | ||
| 3386 | size_t size, KMemoryPermission perm, | ||
| 3387 | bool is_aligned, bool check_heap) { | ||
| 3388 | // Lightly validate the range before doing anything else. | ||
| 3389 | const size_t num_pages = size / PageSize; | ||
| 3390 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3391 | |||
| 3392 | // Lock the table. | ||
| 3393 | KScopedLightLock lk(m_general_lock); | ||
| 3394 | |||
| 3395 | // Check the memory state. | ||
| 3396 | const KMemoryState test_state = | ||
| 3397 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | | ||
| 3398 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 3399 | size_t num_allocator_blocks; | ||
| 3400 | KMemoryState old_state; | ||
| 3401 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 3402 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 3403 | test_state, perm, perm, | ||
| 3404 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | ||
| 3405 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | ||
| 3406 | |||
| 3407 | // Create an update allocator. | ||
| 3408 | Result allocator_result; | ||
| 3409 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3410 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3411 | R_TRY(allocator_result); | ||
| 3412 | |||
| 3413 | // Update the memory blocks. | ||
| 3414 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 3415 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | ||
| 3416 | |||
| 3417 | // Set whether the locked memory was io. | ||
| 3418 | *out_is_io = | ||
| 3419 | static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; | ||
| 3420 | |||
| 3421 | R_SUCCEED(); | ||
| 3422 | } | ||
| 3423 | |||
| 3424 | Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, | ||
| 3425 | bool check_heap) { | ||
| 3426 | // Lightly validate the range before doing anything else. | ||
| 3427 | const size_t num_pages = size / PageSize; | ||
| 3428 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3429 | |||
| 3430 | // Lock the table. | ||
| 3431 | KScopedLightLock lk(m_general_lock); | ||
| 3432 | |||
| 3433 | // Check the memory state. | ||
| 3434 | const KMemoryState test_state = | ||
| 3435 | KMemoryState::FlagCanDeviceMap | | ||
| 3436 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 3437 | size_t num_allocator_blocks; | ||
| 3438 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3439 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, | ||
| 3440 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 3441 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3442 | |||
| 3443 | // Create an update allocator. | ||
| 3444 | Result allocator_result; | ||
| 3445 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3446 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3447 | R_TRY(allocator_result); | ||
| 3448 | |||
| 3449 | // Update the memory blocks. | ||
| 3450 | const KMemoryBlockManager::MemoryBlockLockFunction lock_func = | ||
| 3451 | m_enable_device_address_space_merge | ||
| 3452 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare | ||
| 3453 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; | ||
| 3454 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, | ||
| 3455 | KMemoryPermission::None); | ||
| 3456 | |||
| 3457 | R_SUCCEED(); | ||
| 3458 | } | ||
| 3459 | |||
| 3460 | Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { | ||
| 3461 | // Lightly validate the range before doing anything else. | ||
| 3462 | const size_t num_pages = size / PageSize; | ||
| 3463 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3464 | |||
| 3465 | // Lock the table. | ||
| 3466 | KScopedLightLock lk(m_general_lock); | ||
| 3467 | |||
| 3468 | // Check the memory state. | ||
| 3469 | size_t num_allocator_blocks; | ||
| 3470 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3471 | std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 3472 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 3473 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3474 | |||
| 3475 | // Create an update allocator. | ||
| 3476 | Result allocator_result; | ||
| 3477 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3478 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 3479 | R_TRY(allocator_result); | ||
| 3480 | |||
| 3481 | // Update the memory blocks. | ||
| 3482 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | ||
| 3483 | &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); | ||
| 3484 | |||
| 3485 | R_SUCCEED(); | ||
| 3486 | } | ||
| 3487 | |||
| 3488 | Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) { | ||
| 3489 | // Lightly validate the range before doing anything else. | ||
| 3490 | const size_t num_pages = size / PageSize; | ||
| 3491 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 3492 | |||
| 3493 | // Lock the table. | ||
| 3494 | KScopedLightLock lk(m_general_lock); | ||
| 3495 | |||
| 3496 | // Check memory state. | ||
| 3497 | size_t allocator_num_blocks = 0; | ||
| 3498 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3499 | std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap, | ||
| 3500 | KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, | ||
| 3501 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3502 | |||
| 3503 | // Create an update allocator for the region. | ||
| 3504 | Result allocator_result; | ||
| 3505 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 3506 | m_memory_block_slab_manager, allocator_num_blocks); | ||
| 3507 | R_TRY(allocator_result); | ||
| 3508 | |||
| 3509 | // Update the memory blocks. | ||
| 3510 | m_memory_block_manager.UpdateLock( | ||
| 3511 | std::addressof(allocator), address, num_pages, | ||
| 3512 | m_enable_device_address_space_merge | ||
| 3513 | ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare | ||
| 3514 | : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, | ||
| 3515 | KMemoryPermission::None); | ||
| 3516 | |||
| 3517 | R_SUCCEED(); | ||
| 3518 | } | ||
| 3519 | |||
| 3520 | Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 3521 | KProcessAddress address, size_t size, | ||
| 3522 | KMemoryPermission perm, | ||
| 3523 | bool is_aligned) { | ||
| 3524 | // Lock the table. | ||
| 3525 | KScopedLightLock lk(m_general_lock); | ||
| 3526 | |||
| 3527 | // Get the range. | ||
| 3528 | const KMemoryState test_state = | ||
| 3529 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | ||
| 3530 | R_TRY(this->GetContiguousMemoryRangeWithState( | ||
| 3531 | out, address, size, test_state, test_state, perm, perm, | ||
| 3532 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None)); | ||
| 3533 | |||
| 3534 | // We got the range, so open it. | ||
| 3535 | out->Open(); | ||
| 3536 | |||
| 3537 | R_SUCCEED(); | ||
| 3538 | } | ||
| 3539 | |||
| 3540 | Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, | ||
| 3541 | KProcessAddress address, | ||
| 3542 | size_t size) { | ||
| 3543 | // Lock the table. | ||
| 3544 | KScopedLightLock lk(m_general_lock); | ||
| 3545 | |||
| 3546 | // Get the range. | ||
| 3547 | R_TRY(this->GetContiguousMemoryRangeWithState( | ||
| 3548 | out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap, | ||
| 3549 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 3550 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | ||
| 3551 | |||
| 3552 | // We got the range, so open it. | ||
| 3553 | out->Open(); | ||
| 3554 | |||
| 3555 | R_SUCCEED(); | ||
| 3556 | } | ||
| 3557 | |||
| 3558 | Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, | ||
| 3559 | size_t size) { | ||
| 3560 | R_RETURN(this->LockMemoryAndOpen( | ||
| 3561 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 3562 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 3563 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 3564 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 3565 | KMemoryPermission::KernelReadWrite), | ||
| 3566 | KMemoryAttribute::Locked)); | ||
| 3567 | } | ||
| 3568 | |||
| 3569 | Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { | ||
| 3570 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 3571 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 3572 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3573 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 3574 | KMemoryAttribute::Locked, nullptr)); | ||
| 3575 | } | ||
| 3576 | |||
| 3577 | Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 3578 | KMemoryPermission perm) { | ||
| 3579 | R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer, | ||
| 3580 | KMemoryState::FlagCanTransfer, KMemoryPermission::All, | ||
| 3581 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | ||
| 3582 | KMemoryAttribute::None, perm, KMemoryAttribute::Locked)); | ||
| 3583 | } | ||
| 3584 | |||
| 3585 | Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, | ||
| 3586 | const KPageGroup& pg) { | ||
| 3587 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer, | ||
| 3588 | KMemoryState::FlagCanTransfer, KMemoryPermission::None, | ||
| 3589 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3590 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 3591 | KMemoryAttribute::Locked, std::addressof(pg))); | ||
| 3592 | } | ||
| 3593 | |||
| 3594 | Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) { | ||
| 3595 | R_RETURN(this->LockMemoryAndOpen( | ||
| 3596 | out, nullptr, address, size, KMemoryState::FlagCanCodeMemory, | ||
| 3597 | KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, | ||
| 3598 | KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 3599 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 3600 | KMemoryPermission::KernelReadWrite), | ||
| 3601 | KMemoryAttribute::Locked)); | ||
| 3602 | } | ||
| 3603 | |||
| 3604 | Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, | ||
| 3605 | const KPageGroup& pg) { | ||
| 3606 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory, | ||
| 3607 | KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, | ||
| 3608 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 3609 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 3610 | KMemoryAttribute::Locked, std::addressof(pg))); | ||
| 3611 | } | ||
| 3612 | |||
| 3613 | Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, | ||
| 3614 | KProcessAddress address, | ||
| 3615 | size_t size) { | ||
| 3616 | // Lock the table. | ||
| 3617 | KScopedLightLock lk(m_general_lock); | ||
| 3618 | |||
| 3619 | // Get the range. | ||
| 3620 | R_TRY(this->GetContiguousMemoryRangeWithState( | ||
| 3621 | out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted, | ||
| 3622 | KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached, | ||
| 3623 | KMemoryAttribute::None)); | ||
| 3624 | |||
| 3625 | // We got the range, so open it. | ||
| 3626 | out->Open(); | ||
| 3627 | |||
| 3628 | R_SUCCEED(); | ||
| 3629 | } | ||
| 3630 | |||
| 3631 | Result KPageTableBase::CopyMemoryFromLinearToUser( | ||
| 3632 | KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 3633 | KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask, | ||
| 3634 | KMemoryAttribute src_attr) { | ||
| 3635 | // Lightly validate the range before doing anything else. | ||
| 3636 | R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 3637 | |||
| 3638 | // Get the destination memory reference. | ||
| 3639 | auto& dst_memory = GetCurrentMemory(m_kernel); | ||
| 3640 | |||
| 3641 | // Copy the memory. | ||
| 3642 | { | ||
| 3643 | // Lock the table. | ||
| 3644 | KScopedLightLock lk(m_general_lock); | ||
| 3645 | |||
| 3646 | // Check memory state. | ||
| 3647 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3648 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 3649 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 3650 | |||
| 3651 | auto& impl = this->GetImpl(); | ||
| 3652 | |||
| 3653 | // Begin traversal. | ||
| 3654 | TraversalContext context; | ||
| 3655 | TraversalEntry next_entry; | ||
| 3656 | bool traverse_valid = | ||
| 3657 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); | ||
| 3658 | ASSERT(traverse_valid); | ||
| 3659 | |||
| 3660 | // Prepare tracking variables. | ||
| 3661 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3662 | size_t cur_size = | ||
| 3663 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3664 | size_t tot_size = cur_size; | ||
| 3665 | |||
| 3666 | auto PerformCopy = [&]() -> Result { | ||
| 3667 | // Ensure the address is linear mapped. | ||
| 3668 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3669 | |||
| 3670 | // Copy as much aligned data as we can. | ||
| 3671 | if (cur_size >= sizeof(u32)) { | ||
| 3672 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3673 | R_UNLESS(dst_memory.WriteBlock(dst_addr, | ||
| 3674 | GetLinearMappedVirtualPointer(m_kernel, cur_addr), | ||
| 3675 | copy_size), | ||
| 3676 | ResultInvalidCurrentMemory); | ||
| 3677 | |||
| 3678 | dst_addr += copy_size; | ||
| 3679 | cur_addr += copy_size; | ||
| 3680 | cur_size -= copy_size; | ||
| 3681 | } | ||
| 3682 | |||
| 3683 | // Copy remaining data. | ||
| 3684 | if (cur_size > 0) { | ||
| 3685 | R_UNLESS(dst_memory.WriteBlock( | ||
| 3686 | dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size), | ||
| 3687 | ResultInvalidCurrentMemory); | ||
| 3688 | } | ||
| 3689 | |||
| 3690 | R_SUCCEED(); | ||
| 3691 | }; | ||
| 3692 | |||
| 3693 | // Iterate. | ||
| 3694 | while (tot_size < size) { | ||
| 3695 | // Continue the traversal. | ||
| 3696 | traverse_valid = | ||
| 3697 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3698 | ASSERT(traverse_valid); | ||
| 3699 | |||
| 3700 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3701 | // Perform copy. | ||
| 3702 | R_TRY(PerformCopy()); | ||
| 3703 | |||
| 3704 | // Advance. | ||
| 3705 | dst_addr += cur_size; | ||
| 3706 | |||
| 3707 | cur_addr = next_entry.phys_addr; | ||
| 3708 | cur_size = next_entry.block_size; | ||
| 3709 | } else { | ||
| 3710 | cur_size += next_entry.block_size; | ||
| 3711 | } | ||
| 3712 | |||
| 3713 | tot_size += next_entry.block_size; | ||
| 3714 | } | ||
| 3715 | |||
| 3716 | // Ensure we use the right size for the last block. | ||
| 3717 | if (tot_size > size) { | ||
| 3718 | cur_size -= (tot_size - size); | ||
| 3719 | } | ||
| 3720 | |||
| 3721 | // Perform copy for the last block. | ||
| 3722 | R_TRY(PerformCopy()); | ||
| 3723 | } | ||
| 3724 | |||
| 3725 | R_SUCCEED(); | ||
| 3726 | } | ||
| 3727 | |||
| 3728 | Result KPageTableBase::CopyMemoryFromLinearToKernel( | ||
| 3729 | void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 3730 | KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask, | ||
| 3731 | KMemoryAttribute src_attr) { | ||
| 3732 | // Lightly validate the range before doing anything else. | ||
| 3733 | R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 3734 | |||
| 3735 | // Copy the memory. | ||
| 3736 | { | ||
| 3737 | // Lock the table. | ||
| 3738 | KScopedLightLock lk(m_general_lock); | ||
| 3739 | |||
| 3740 | // Check memory state. | ||
| 3741 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3742 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 3743 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 3744 | |||
| 3745 | auto& impl = this->GetImpl(); | ||
| 3746 | |||
| 3747 | // Begin traversal. | ||
| 3748 | TraversalContext context; | ||
| 3749 | TraversalEntry next_entry; | ||
| 3750 | bool traverse_valid = | ||
| 3751 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); | ||
| 3752 | ASSERT(traverse_valid); | ||
| 3753 | |||
| 3754 | // Prepare tracking variables. | ||
| 3755 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3756 | size_t cur_size = | ||
| 3757 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3758 | size_t tot_size = cur_size; | ||
| 3759 | |||
| 3760 | auto PerformCopy = [&]() -> Result { | ||
| 3761 | // Ensure the address is linear mapped. | ||
| 3762 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3763 | |||
| 3764 | // Copy the data. | ||
| 3765 | std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size); | ||
| 3766 | |||
| 3767 | R_SUCCEED(); | ||
| 3768 | }; | ||
| 3769 | |||
| 3770 | // Iterate. | ||
| 3771 | while (tot_size < size) { | ||
| 3772 | // Continue the traversal. | ||
| 3773 | traverse_valid = | ||
| 3774 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3775 | ASSERT(traverse_valid); | ||
| 3776 | |||
| 3777 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3778 | // Perform copy. | ||
| 3779 | R_TRY(PerformCopy()); | ||
| 3780 | |||
| 3781 | // Advance. | ||
| 3782 | buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size); | ||
| 3783 | |||
| 3784 | cur_addr = next_entry.phys_addr; | ||
| 3785 | cur_size = next_entry.block_size; | ||
| 3786 | } else { | ||
| 3787 | cur_size += next_entry.block_size; | ||
| 3788 | } | ||
| 3789 | |||
| 3790 | tot_size += next_entry.block_size; | ||
| 3791 | } | ||
| 3792 | |||
| 3793 | // Ensure we use the right size for the last block. | ||
| 3794 | if (tot_size > size) { | ||
| 3795 | cur_size -= (tot_size - size); | ||
| 3796 | } | ||
| 3797 | |||
| 3798 | // Perform copy for the last block. | ||
| 3799 | R_TRY(PerformCopy()); | ||
| 3800 | } | ||
| 3801 | |||
| 3802 | R_SUCCEED(); | ||
| 3803 | } | ||
| 3804 | |||
| 3805 | Result KPageTableBase::CopyMemoryFromUserToLinear( | ||
| 3806 | KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 3807 | KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 3808 | KProcessAddress src_addr) { | ||
| 3809 | // Lightly validate the range before doing anything else. | ||
| 3810 | R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 3811 | |||
| 3812 | // Get the source memory reference. | ||
| 3813 | auto& src_memory = GetCurrentMemory(m_kernel); | ||
| 3814 | |||
| 3815 | // Copy the memory. | ||
| 3816 | { | ||
| 3817 | // Lock the table. | ||
| 3818 | KScopedLightLock lk(m_general_lock); | ||
| 3819 | |||
| 3820 | // Check memory state. | ||
| 3821 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3822 | dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, | ||
| 3823 | dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); | ||
| 3824 | |||
| 3825 | auto& impl = this->GetImpl(); | ||
| 3826 | |||
| 3827 | // Begin traversal. | ||
| 3828 | TraversalContext context; | ||
| 3829 | TraversalEntry next_entry; | ||
| 3830 | bool traverse_valid = | ||
| 3831 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); | ||
| 3832 | ASSERT(traverse_valid); | ||
| 3833 | |||
| 3834 | // Prepare tracking variables. | ||
| 3835 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3836 | size_t cur_size = | ||
| 3837 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3838 | size_t tot_size = cur_size; | ||
| 3839 | |||
| 3840 | auto PerformCopy = [&]() -> Result { | ||
| 3841 | // Ensure the address is linear mapped. | ||
| 3842 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3843 | |||
| 3844 | // Copy as much aligned data as we can. | ||
| 3845 | if (cur_size >= sizeof(u32)) { | ||
| 3846 | const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32)); | ||
| 3847 | R_UNLESS(src_memory.ReadBlock(src_addr, | ||
| 3848 | GetLinearMappedVirtualPointer(m_kernel, cur_addr), | ||
| 3849 | copy_size), | ||
| 3850 | ResultInvalidCurrentMemory); | ||
| 3851 | src_addr += copy_size; | ||
| 3852 | cur_addr += copy_size; | ||
| 3853 | cur_size -= copy_size; | ||
| 3854 | } | ||
| 3855 | |||
| 3856 | // Copy remaining data. | ||
| 3857 | if (cur_size > 0) { | ||
| 3858 | R_UNLESS(src_memory.ReadBlock( | ||
| 3859 | src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size), | ||
| 3860 | ResultInvalidCurrentMemory); | ||
| 3861 | } | ||
| 3862 | |||
| 3863 | R_SUCCEED(); | ||
| 3864 | }; | ||
| 3865 | |||
| 3866 | // Iterate. | ||
| 3867 | while (tot_size < size) { | ||
| 3868 | // Continue the traversal. | ||
| 3869 | traverse_valid = | ||
| 3870 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3871 | ASSERT(traverse_valid); | ||
| 3872 | |||
| 3873 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3874 | // Perform copy. | ||
| 3875 | R_TRY(PerformCopy()); | ||
| 3876 | |||
| 3877 | // Advance. | ||
| 3878 | src_addr += cur_size; | ||
| 3879 | |||
| 3880 | cur_addr = next_entry.phys_addr; | ||
| 3881 | cur_size = next_entry.block_size; | ||
| 3882 | } else { | ||
| 3883 | cur_size += next_entry.block_size; | ||
| 3884 | } | ||
| 3885 | |||
| 3886 | tot_size += next_entry.block_size; | ||
| 3887 | } | ||
| 3888 | |||
| 3889 | // Ensure we use the right size for the last block. | ||
| 3890 | if (tot_size > size) { | ||
| 3891 | cur_size -= (tot_size - size); | ||
| 3892 | } | ||
| 3893 | |||
| 3894 | // Perform copy for the last block. | ||
| 3895 | R_TRY(PerformCopy()); | ||
| 3896 | } | ||
| 3897 | |||
| 3898 | R_SUCCEED(); | ||
| 3899 | } | ||
| 3900 | |||
| 3901 | Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, | ||
| 3902 | KMemoryState dst_state_mask, | ||
| 3903 | KMemoryState dst_state, | ||
| 3904 | KMemoryPermission dst_test_perm, | ||
| 3905 | KMemoryAttribute dst_attr_mask, | ||
| 3906 | KMemoryAttribute dst_attr, void* buffer) { | ||
| 3907 | // Lightly validate the range before doing anything else. | ||
| 3908 | R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 3909 | |||
| 3910 | // Copy the memory. | ||
| 3911 | { | ||
| 3912 | // Lock the table. | ||
| 3913 | KScopedLightLock lk(m_general_lock); | ||
| 3914 | |||
| 3915 | // Check memory state. | ||
| 3916 | R_TRY(this->CheckMemoryStateContiguous( | ||
| 3917 | dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, | ||
| 3918 | dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); | ||
| 3919 | |||
| 3920 | auto& impl = this->GetImpl(); | ||
| 3921 | |||
| 3922 | // Begin traversal. | ||
| 3923 | TraversalContext context; | ||
| 3924 | TraversalEntry next_entry; | ||
| 3925 | bool traverse_valid = | ||
| 3926 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); | ||
| 3927 | ASSERT(traverse_valid); | ||
| 3928 | |||
| 3929 | // Prepare tracking variables. | ||
| 3930 | KPhysicalAddress cur_addr = next_entry.phys_addr; | ||
| 3931 | size_t cur_size = | ||
| 3932 | next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); | ||
| 3933 | size_t tot_size = cur_size; | ||
| 3934 | |||
| 3935 | auto PerformCopy = [&]() -> Result { | ||
| 3936 | // Ensure the address is linear mapped. | ||
| 3937 | R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory); | ||
| 3938 | |||
| 3939 | // Copy the data. | ||
| 3940 | std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size); | ||
| 3941 | |||
| 3942 | R_SUCCEED(); | ||
| 3943 | }; | ||
| 3944 | |||
| 3945 | // Iterate. | ||
| 3946 | while (tot_size < size) { | ||
| 3947 | // Continue the traversal. | ||
| 3948 | traverse_valid = | ||
| 3949 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 3950 | ASSERT(traverse_valid); | ||
| 3951 | |||
| 3952 | if (next_entry.phys_addr != (cur_addr + cur_size)) { | ||
| 3953 | // Perform copy. | ||
| 3954 | R_TRY(PerformCopy()); | ||
| 3955 | |||
| 3956 | // Advance. | ||
| 3957 | buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size); | ||
| 3958 | |||
| 3959 | cur_addr = next_entry.phys_addr; | ||
| 3960 | cur_size = next_entry.block_size; | ||
| 3961 | } else { | ||
| 3962 | cur_size += next_entry.block_size; | ||
| 3963 | } | ||
| 3964 | |||
| 3965 | tot_size += next_entry.block_size; | ||
| 3966 | } | ||
| 3967 | |||
| 3968 | // Ensure we use the right size for the last block. | ||
| 3969 | if (tot_size > size) { | ||
| 3970 | cur_size -= (tot_size - size); | ||
| 3971 | } | ||
| 3972 | |||
| 3973 | // Perform copy for the last block. | ||
| 3974 | R_TRY(PerformCopy()); | ||
| 3975 | } | ||
| 3976 | |||
| 3977 | R_SUCCEED(); | ||
| 3978 | } | ||
| 3979 | |||
| 3980 | Result KPageTableBase::CopyMemoryFromHeapToHeap( | ||
| 3981 | KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 3982 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 3983 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 3984 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 3985 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 3986 | // For convenience, alias this. | ||
| 3987 | KPageTableBase& src_page_table = *this; | ||
| 3988 | |||
| 3989 | // Lightly validate the ranges before doing anything else. | ||
| 3990 | R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 3991 | R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 3992 | |||
| 3993 | // Copy the memory. | ||
| 3994 | { | ||
| 3995 | // Acquire the table locks. | ||
| 3996 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 3997 | |||
| 3998 | // Check memory state. | ||
| 3999 | R_TRY(src_page_table.CheckMemoryStateContiguous( | ||
| 4000 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 4001 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 4002 | R_TRY(dst_page_table.CheckMemoryStateContiguous( | ||
| 4003 | dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, | ||
| 4004 | dst_attr_mask | KMemoryAttribute::Uncached, dst_attr)); | ||
| 4005 | |||
| 4006 | // Get implementations. | ||
| 4007 | auto& src_impl = src_page_table.GetImpl(); | ||
| 4008 | auto& dst_impl = dst_page_table.GetImpl(); | ||
| 4009 | |||
| 4010 | // Prepare for traversal. | ||
| 4011 | TraversalContext src_context; | ||
| 4012 | TraversalContext dst_context; | ||
| 4013 | TraversalEntry src_next_entry; | ||
| 4014 | TraversalEntry dst_next_entry; | ||
| 4015 | bool traverse_valid; | ||
| 4016 | |||
| 4017 | // Begin traversal. | ||
| 4018 | traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), | ||
| 4019 | std::addressof(src_context), src_addr); | ||
| 4020 | ASSERT(traverse_valid); | ||
| 4021 | traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), | ||
| 4022 | std::addressof(dst_context), dst_addr); | ||
| 4023 | ASSERT(traverse_valid); | ||
| 4024 | |||
| 4025 | // Prepare tracking variables. | ||
| 4026 | KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4027 | KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4028 | size_t cur_src_size = src_next_entry.block_size - | ||
| 4029 | (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); | ||
| 4030 | size_t cur_dst_size = dst_next_entry.block_size - | ||
| 4031 | (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); | ||
| 4032 | |||
| 4033 | // Adjust the initial block sizes. | ||
| 4034 | src_next_entry.block_size = cur_src_size; | ||
| 4035 | dst_next_entry.block_size = cur_dst_size; | ||
| 4036 | |||
| 4037 | // Before we get any crazier, succeed if there's nothing to do. | ||
| 4038 | R_SUCCEED_IF(size == 0); | ||
| 4039 | |||
| 4040 | // We're going to manage dual traversal via an offset against the total size. | ||
| 4041 | KPhysicalAddress cur_src_addr = cur_src_block_addr; | ||
| 4042 | KPhysicalAddress cur_dst_addr = cur_dst_block_addr; | ||
| 4043 | size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size); | ||
| 4044 | |||
| 4045 | // Iterate. | ||
| 4046 | size_t ofs = 0; | ||
| 4047 | while (ofs < size) { | ||
| 4048 | // Determine how much we can copy this iteration. | ||
| 4049 | const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs); | ||
| 4050 | |||
| 4051 | // If we need to advance the traversals, do so. | ||
| 4052 | bool updated_src = false, updated_dst = false, skip_copy = false; | ||
| 4053 | if (ofs + cur_copy_size != size) { | ||
| 4054 | if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { | ||
| 4055 | // Continue the src traversal. | ||
| 4056 | traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), | ||
| 4057 | std::addressof(src_context)); | ||
| 4058 | ASSERT(traverse_valid); | ||
| 4059 | |||
| 4060 | // Update source. | ||
| 4061 | updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr; | ||
| 4062 | } | ||
| 4063 | |||
| 4064 | if (cur_dst_addr + cur_min_size == | ||
| 4065 | dst_next_entry.phys_addr + dst_next_entry.block_size) { | ||
| 4066 | // Continue the dst traversal. | ||
| 4067 | traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), | ||
| 4068 | std::addressof(dst_context)); | ||
| 4069 | ASSERT(traverse_valid); | ||
| 4070 | |||
| 4071 | // Update destination. | ||
| 4072 | updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr; | ||
| 4073 | } | ||
| 4074 | |||
| 4075 | // If we didn't update either of source/destination, skip the copy this iteration. | ||
| 4076 | if (!updated_src && !updated_dst) { | ||
| 4077 | skip_copy = true; | ||
| 4078 | |||
| 4079 | // Update the source block address. | ||
| 4080 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4081 | } | ||
| 4082 | } | ||
| 4083 | |||
| 4084 | // Do the copy, unless we're skipping it. | ||
| 4085 | if (!skip_copy) { | ||
| 4086 | // We need both ends of the copy to be heap blocks. | ||
| 4087 | R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory); | ||
| 4088 | R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory); | ||
| 4089 | |||
| 4090 | // Copy the data. | ||
| 4091 | std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr), | ||
| 4092 | GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size); | ||
| 4093 | |||
| 4094 | // Update. | ||
| 4095 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4096 | cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; | ||
| 4097 | cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4098 | cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; | ||
| 4099 | |||
| 4100 | // Advance offset. | ||
| 4101 | ofs += cur_copy_size; | ||
| 4102 | } | ||
| 4103 | |||
| 4104 | // Update min size. | ||
| 4105 | cur_src_size = src_next_entry.block_size; | ||
| 4106 | cur_dst_size = dst_next_entry.block_size; | ||
| 4107 | cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, | ||
| 4108 | cur_dst_block_addr - cur_dst_addr + cur_dst_size); | ||
| 4109 | } | ||
| 4110 | } | ||
| 4111 | |||
| 4112 | R_SUCCEED(); | ||
| 4113 | } | ||
| 4114 | |||
| 4115 | Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 4116 | KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 4117 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 4118 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 4119 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 4120 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 4121 | // For convenience, alias this. | ||
| 4122 | KPageTableBase& src_page_table = *this; | ||
| 4123 | |||
| 4124 | // Lightly validate the ranges before doing anything else. | ||
| 4125 | R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory); | ||
| 4126 | R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory); | ||
| 4127 | |||
| 4128 | // Copy the memory. | ||
| 4129 | { | ||
| 4130 | // Acquire the table locks. | ||
| 4131 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 4132 | |||
| 4133 | // Check memory state for source. | ||
| 4134 | R_TRY(src_page_table.CheckMemoryStateContiguous( | ||
| 4135 | src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, | ||
| 4136 | src_attr_mask | KMemoryAttribute::Uncached, src_attr)); | ||
| 4137 | |||
| 4138 | // Destination state is intentionally unchecked. | ||
| 4139 | |||
| 4140 | // Get implementations. | ||
| 4141 | auto& src_impl = src_page_table.GetImpl(); | ||
| 4142 | auto& dst_impl = dst_page_table.GetImpl(); | ||
| 4143 | |||
| 4144 | // Prepare for traversal. | ||
| 4145 | TraversalContext src_context; | ||
| 4146 | TraversalContext dst_context; | ||
| 4147 | TraversalEntry src_next_entry; | ||
| 4148 | TraversalEntry dst_next_entry; | ||
| 4149 | bool traverse_valid; | ||
| 4150 | |||
| 4151 | // Begin traversal. | ||
| 4152 | traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), | ||
| 4153 | std::addressof(src_context), src_addr); | ||
| 4154 | ASSERT(traverse_valid); | ||
| 4155 | traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), | ||
| 4156 | std::addressof(dst_context), dst_addr); | ||
| 4157 | ASSERT(traverse_valid); | ||
| 4158 | |||
| 4159 | // Prepare tracking variables. | ||
| 4160 | KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4161 | KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4162 | size_t cur_src_size = src_next_entry.block_size - | ||
| 4163 | (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); | ||
| 4164 | size_t cur_dst_size = dst_next_entry.block_size - | ||
| 4165 | (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); | ||
| 4166 | |||
| 4167 | // Adjust the initial block sizes. | ||
| 4168 | src_next_entry.block_size = cur_src_size; | ||
| 4169 | dst_next_entry.block_size = cur_dst_size; | ||
| 4170 | |||
| 4171 | // Before we get any crazier, succeed if there's nothing to do. | ||
| 4172 | R_SUCCEED_IF(size == 0); | ||
| 4173 | |||
| 4174 | // We're going to manage dual traversal via an offset against the total size. | ||
| 4175 | KPhysicalAddress cur_src_addr = cur_src_block_addr; | ||
| 4176 | KPhysicalAddress cur_dst_addr = cur_dst_block_addr; | ||
| 4177 | size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size); | ||
| 4178 | |||
| 4179 | // Iterate. | ||
| 4180 | size_t ofs = 0; | ||
| 4181 | while (ofs < size) { | ||
| 4182 | // Determine how much we can copy this iteration. | ||
| 4183 | const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs); | ||
| 4184 | |||
| 4185 | // If we need to advance the traversals, do so. | ||
| 4186 | bool updated_src = false, updated_dst = false, skip_copy = false; | ||
| 4187 | if (ofs + cur_copy_size != size) { | ||
| 4188 | if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { | ||
| 4189 | // Continue the src traversal. | ||
| 4190 | traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), | ||
| 4191 | std::addressof(src_context)); | ||
| 4192 | ASSERT(traverse_valid); | ||
| 4193 | |||
| 4194 | // Update source. | ||
| 4195 | updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr; | ||
| 4196 | } | ||
| 4197 | |||
| 4198 | if (cur_dst_addr + cur_min_size == | ||
| 4199 | dst_next_entry.phys_addr + dst_next_entry.block_size) { | ||
| 4200 | // Continue the dst traversal. | ||
| 4201 | traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), | ||
| 4202 | std::addressof(dst_context)); | ||
| 4203 | ASSERT(traverse_valid); | ||
| 4204 | |||
| 4205 | // Update destination. | ||
| 4206 | updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr; | ||
| 4207 | } | ||
| 4208 | |||
| 4209 | // If we didn't update either of source/destination, skip the copy this iteration. | ||
| 4210 | if (!updated_src && !updated_dst) { | ||
| 4211 | skip_copy = true; | ||
| 4212 | |||
| 4213 | // Update the source block address. | ||
| 4214 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4215 | } | ||
| 4216 | } | ||
| 4217 | |||
| 4218 | // Do the copy, unless we're skipping it. | ||
| 4219 | if (!skip_copy) { | ||
| 4220 | // We need both ends of the copy to be heap blocks. | ||
| 4221 | R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory); | ||
| 4222 | R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory); | ||
| 4223 | |||
| 4224 | // Copy the data. | ||
| 4225 | std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr), | ||
| 4226 | GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size); | ||
| 4227 | |||
| 4228 | // Update. | ||
| 4229 | cur_src_block_addr = src_next_entry.phys_addr; | ||
| 4230 | cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; | ||
| 4231 | cur_dst_block_addr = dst_next_entry.phys_addr; | ||
| 4232 | cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; | ||
| 4233 | |||
| 4234 | // Advance offset. | ||
| 4235 | ofs += cur_copy_size; | ||
| 4236 | } | ||
| 4237 | |||
| 4238 | // Update min size. | ||
| 4239 | cur_src_size = src_next_entry.block_size; | ||
| 4240 | cur_dst_size = dst_next_entry.block_size; | ||
| 4241 | cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, | ||
| 4242 | cur_dst_block_addr - cur_dst_addr + cur_dst_size); | ||
| 4243 | } | ||
| 4244 | } | ||
| 4245 | |||
| 4246 | R_SUCCEED(); | ||
| 4247 | } | ||
| 4248 | |||
| 4249 | Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 4250 | KProcessAddress address, size_t size, | ||
| 4251 | KMemoryPermission test_perm, KMemoryState dst_state) { | ||
| 4252 | // Validate pre-conditions. | ||
| 4253 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 4254 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 4255 | test_perm == KMemoryPermission::UserRead); | ||
| 4256 | |||
| 4257 | // Check that the address is in range. | ||
| 4258 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 4259 | |||
| 4260 | // Get the source permission. | ||
| 4261 | const auto src_perm = static_cast<KMemoryPermission>( | ||
| 4262 | (test_perm == KMemoryPermission::UserReadWrite) | ||
| 4263 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 4264 | : KMemoryPermission::UserRead); | ||
| 4265 | |||
| 4266 | // Get aligned extents. | ||
| 4267 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 4268 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 4269 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 4270 | const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 4271 | |||
| 4272 | const auto aligned_src_last = GetInteger(aligned_src_end) - 1; | ||
| 4273 | const auto mapping_src_last = GetInteger(mapping_src_end) - 1; | ||
| 4274 | |||
| 4275 | // Get the test state and attribute mask. | ||
| 4276 | KMemoryState test_state; | ||
| 4277 | KMemoryAttribute test_attr_mask; | ||
| 4278 | switch (dst_state) { | ||
| 4279 | case KMemoryState::Ipc: | ||
| 4280 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 4281 | test_attr_mask = | ||
| 4282 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 4283 | break; | ||
| 4284 | case KMemoryState::NonSecureIpc: | ||
| 4285 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 4286 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4287 | break; | ||
| 4288 | case KMemoryState::NonDeviceIpc: | ||
| 4289 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 4290 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4291 | break; | ||
| 4292 | default: | ||
| 4293 | R_THROW(ResultInvalidCombination); | ||
| 4294 | } | ||
| 4295 | |||
| 4296 | // Ensure that on failure, we roll back appropriately. | ||
| 4297 | size_t mapped_size = 0; | ||
| 4298 | ON_RESULT_FAILURE { | ||
| 4299 | if (mapped_size > 0) { | ||
| 4300 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 4301 | src_perm); | ||
| 4302 | } | ||
| 4303 | }; | ||
| 4304 | |||
| 4305 | size_t blocks_needed = 0; | ||
| 4306 | |||
| 4307 | // Iterate, mapping as needed. | ||
| 4308 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 4309 | while (true) { | ||
| 4310 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 4311 | |||
| 4312 | // Validate the current block. | ||
| 4313 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 4314 | test_attr_mask, KMemoryAttribute::None)); | ||
| 4315 | |||
| 4316 | if (mapping_src_start < mapping_src_end && | ||
| 4317 | GetInteger(mapping_src_start) < info.GetEndAddress() && | ||
| 4318 | info.GetAddress() < GetInteger(mapping_src_end)) { | ||
| 4319 | const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) | ||
| 4320 | ? info.GetAddress() | ||
| 4321 | : GetInteger(mapping_src_start); | ||
| 4322 | const auto cur_end = mapping_src_last >= info.GetLastAddress() | ||
| 4323 | ? info.GetEndAddress() | ||
| 4324 | : GetInteger(mapping_src_end); | ||
| 4325 | const size_t cur_size = cur_end - cur_start; | ||
| 4326 | |||
| 4327 | if (info.GetAddress() < GetInteger(mapping_src_start)) { | ||
| 4328 | ++blocks_needed; | ||
| 4329 | } | ||
| 4330 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 4331 | ++blocks_needed; | ||
| 4332 | } | ||
| 4333 | |||
| 4334 | // Set the permissions on the block, if we need to. | ||
| 4335 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 4336 | const DisableMergeAttribute head_body_attr = | ||
| 4337 | (GetInteger(mapping_src_start) >= info.GetAddress()) | ||
| 4338 | ? DisableMergeAttribute::DisableHeadAndBody | ||
| 4339 | : DisableMergeAttribute::None; | ||
| 4340 | const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) | ||
| 4341 | ? DisableMergeAttribute::DisableTail | ||
| 4342 | : DisableMergeAttribute::None; | ||
| 4343 | const KPageProperties properties = { | ||
| 4344 | src_perm, false, false, | ||
| 4345 | static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)}; | ||
| 4346 | R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties, | ||
| 4347 | OperationType::ChangePermissions, false)); | ||
| 4348 | } | ||
| 4349 | |||
| 4350 | // Note that we mapped this part. | ||
| 4351 | mapped_size += cur_size; | ||
| 4352 | } | ||
| 4353 | |||
| 4354 | // If the block is at the end, we're done. | ||
| 4355 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 4356 | break; | ||
| 4357 | } | ||
| 4358 | |||
| 4359 | // Advance. | ||
| 4360 | ++it; | ||
| 4361 | ASSERT(it != m_memory_block_manager.end()); | ||
| 4362 | } | ||
| 4363 | |||
| 4364 | if (out_blocks_needed != nullptr) { | ||
| 4365 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 4366 | *out_blocks_needed = blocks_needed; | ||
| 4367 | } | ||
| 4368 | |||
| 4369 | R_SUCCEED(); | ||
| 4370 | } | ||
| 4371 | |||
| 4372 | Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size, | ||
| 4373 | KProcessAddress src_addr, KMemoryPermission test_perm, | ||
| 4374 | KMemoryState dst_state, KPageTableBase& src_page_table, | ||
| 4375 | bool send) { | ||
| 4376 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 4377 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 4378 | |||
| 4379 | // Check that we can theoretically map. | ||
| 4380 | const KProcessAddress region_start = m_alias_region_start; | ||
| 4381 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 4382 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 4383 | |||
| 4384 | // Get aligned source extents. | ||
| 4385 | const KProcessAddress src_start = src_addr; | ||
| 4386 | const KProcessAddress src_end = src_addr + size; | ||
| 4387 | const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize); | ||
| 4388 | const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize); | ||
| 4389 | const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize); | ||
| 4390 | const KProcessAddress mapping_src_end = | ||
| 4391 | Common::AlignDown(GetInteger(src_start) + size, PageSize); | ||
| 4392 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 4393 | const size_t mapping_src_size = | ||
| 4394 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 4395 | |||
| 4396 | // Select a random address to map at. | ||
| 4397 | KProcessAddress dst_addr = 0; | ||
| 4398 | { | ||
| 4399 | const size_t alignment = 4_KiB; | ||
| 4400 | const size_t offset = GetInteger(aligned_src_start) & (alignment - 1); | ||
| 4401 | |||
| 4402 | dst_addr = | ||
| 4403 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 4404 | alignment, offset, this->GetNumGuardPages()); | ||
| 4405 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 4406 | } | ||
| 4407 | |||
| 4408 | // Check that we can perform the operation we're about to perform. | ||
| 4409 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 4410 | |||
| 4411 | // Create an update allocator. | ||
| 4412 | Result allocator_result; | ||
| 4413 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4414 | m_memory_block_slab_manager); | ||
| 4415 | R_TRY(allocator_result); | ||
| 4416 | |||
| 4417 | // We're going to perform an update, so create a helper. | ||
| 4418 | KScopedPageTableUpdater updater(this); | ||
| 4419 | |||
| 4420 | // Reserve space for any partial pages we allocate. | ||
| 4421 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 4422 | KScopedResourceReservation memory_reservation( | ||
| 4423 | m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size); | ||
| 4424 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 4425 | |||
| 4426 | // Ensure that we manage page references correctly. | ||
| 4427 | KPhysicalAddress start_partial_page = 0; | ||
| 4428 | KPhysicalAddress end_partial_page = 0; | ||
| 4429 | KProcessAddress cur_mapped_addr = dst_addr; | ||
| 4430 | |||
| 4431 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 4432 | // free on scope exit. | ||
| 4433 | SCOPE_EXIT({ | ||
| 4434 | if (start_partial_page != 0) { | ||
| 4435 | m_kernel.MemoryManager().Close(start_partial_page, 1); | ||
| 4436 | } | ||
| 4437 | if (end_partial_page != 0) { | ||
| 4438 | m_kernel.MemoryManager().Close(end_partial_page, 1); | ||
| 4439 | } | ||
| 4440 | }); | ||
| 4441 | |||
| 4442 | ON_RESULT_FAILURE { | ||
| 4443 | if (cur_mapped_addr != dst_addr) { | ||
| 4444 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 4445 | DisableMergeAttribute::None}; | ||
| 4446 | R_ASSERT(this->Operate(updater.GetPageList(), dst_addr, | ||
| 4447 | (cur_mapped_addr - dst_addr) / PageSize, 0, false, | ||
| 4448 | unmap_properties, OperationType::Unmap, true)); | ||
| 4449 | } | ||
| 4450 | }; | ||
| 4451 | |||
| 4452 | // Allocate the start page as needed. | ||
| 4453 | if (aligned_src_start < mapping_src_start) { | ||
| 4454 | start_partial_page = | ||
| 4455 | m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 4456 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 4457 | } | ||
| 4458 | |||
| 4459 | // Allocate the end page as needed. | ||
| 4460 | if (mapping_src_end < aligned_src_end && | ||
| 4461 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 4462 | end_partial_page = | ||
| 4463 | m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 4464 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 4465 | } | ||
| 4466 | |||
| 4467 | // Get the implementation. | ||
| 4468 | auto& src_impl = src_page_table.GetImpl(); | ||
| 4469 | |||
| 4470 | // Get the fill value for partial pages. | ||
| 4471 | const auto fill_val = m_ipc_fill_value; | ||
| 4472 | |||
| 4473 | // Begin traversal. | ||
| 4474 | TraversalContext context; | ||
| 4475 | TraversalEntry next_entry; | ||
| 4476 | bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), | ||
| 4477 | std::addressof(context), aligned_src_start); | ||
| 4478 | ASSERT(traverse_valid); | ||
| 4479 | |||
| 4480 | // Prepare tracking variables. | ||
| 4481 | KPhysicalAddress cur_block_addr = next_entry.phys_addr; | ||
| 4482 | size_t cur_block_size = | ||
| 4483 | next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1)); | ||
| 4484 | size_t tot_block_size = cur_block_size; | ||
| 4485 | |||
| 4486 | // Map the start page, if we have one. | ||
| 4487 | if (start_partial_page != 0) { | ||
| 4488 | // Ensure the page holds correct data. | ||
| 4489 | u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page); | ||
| 4490 | if (send) { | ||
| 4491 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 4492 | size_t copy_size, clear_size; | ||
| 4493 | if (src_end < mapping_src_start) { | ||
| 4494 | copy_size = size; | ||
| 4495 | clear_size = mapping_src_start - src_end; | ||
| 4496 | } else { | ||
| 4497 | copy_size = mapping_src_start - src_start; | ||
| 4498 | clear_size = 0; | ||
| 4499 | } | ||
| 4500 | |||
| 4501 | std::memset(start_partial_virt, fill_val, partial_offset); | ||
| 4502 | std::memcpy(start_partial_virt + partial_offset, | ||
| 4503 | GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset, | ||
| 4504 | copy_size); | ||
| 4505 | if (clear_size > 0) { | ||
| 4506 | std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size); | ||
| 4507 | } | ||
| 4508 | } else { | ||
| 4509 | std::memset(start_partial_virt, fill_val, PageSize); | ||
| 4510 | } | ||
| 4511 | |||
| 4512 | // Map the page. | ||
| 4513 | const KPageProperties start_map_properties = {test_perm, false, false, | ||
| 4514 | DisableMergeAttribute::DisableHead}; | ||
| 4515 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, | ||
| 4516 | start_map_properties, OperationType::Map, false)); | ||
| 4517 | |||
| 4518 | // Update tracking extents. | ||
| 4519 | cur_mapped_addr += PageSize; | ||
| 4520 | cur_block_addr += PageSize; | ||
| 4521 | cur_block_size -= PageSize; | ||
| 4522 | |||
| 4523 | // If the block's size was one page, we may need to continue traversal. | ||
| 4524 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 4525 | traverse_valid = | ||
| 4526 | src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 4527 | ASSERT(traverse_valid); | ||
| 4528 | |||
| 4529 | cur_block_addr = next_entry.phys_addr; | ||
| 4530 | cur_block_size = next_entry.block_size; | ||
| 4531 | tot_block_size += next_entry.block_size; | ||
| 4532 | } | ||
| 4533 | } | ||
| 4534 | |||
| 4535 | // Map the remaining pages. | ||
| 4536 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 4537 | // Continue the traversal. | ||
| 4538 | traverse_valid = | ||
| 4539 | src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 4540 | ASSERT(traverse_valid); | ||
| 4541 | |||
| 4542 | // Process the block. | ||
| 4543 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 4544 | // Map the block we've been processing so far. | ||
| 4545 | const KPageProperties map_properties = {test_perm, false, false, | ||
| 4546 | (cur_mapped_addr == dst_addr) | ||
| 4547 | ? DisableMergeAttribute::DisableHead | ||
| 4548 | : DisableMergeAttribute::None}; | ||
| 4549 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize, | ||
| 4550 | cur_block_addr, true, map_properties, OperationType::Map, false)); | ||
| 4551 | |||
| 4552 | // Update tracking extents. | ||
| 4553 | cur_mapped_addr += cur_block_size; | ||
| 4554 | cur_block_addr = next_entry.phys_addr; | ||
| 4555 | cur_block_size = next_entry.block_size; | ||
| 4556 | } else { | ||
| 4557 | cur_block_size += next_entry.block_size; | ||
| 4558 | } | ||
| 4559 | tot_block_size += next_entry.block_size; | ||
| 4560 | } | ||
| 4561 | |||
| 4562 | // Handle the last direct-mapped page. | ||
| 4563 | if (const KProcessAddress mapped_block_end = | ||
| 4564 | aligned_src_start + tot_block_size - cur_block_size; | ||
| 4565 | mapped_block_end < mapping_src_end) { | ||
| 4566 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 4567 | |||
| 4568 | // Map the last block. | ||
| 4569 | const KPageProperties map_properties = {test_perm, false, false, | ||
| 4570 | (cur_mapped_addr == dst_addr) | ||
| 4571 | ? DisableMergeAttribute::DisableHead | ||
| 4572 | : DisableMergeAttribute::None}; | ||
| 4573 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize, | ||
| 4574 | cur_block_addr, true, map_properties, OperationType::Map, false)); | ||
| 4575 | |||
| 4576 | // Update tracking extents. | ||
| 4577 | cur_mapped_addr += last_block_size; | ||
| 4578 | cur_block_addr += last_block_size; | ||
| 4579 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 4580 | cur_block_size == last_block_size) { | ||
| 4581 | traverse_valid = | ||
| 4582 | src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); | ||
| 4583 | ASSERT(traverse_valid); | ||
| 4584 | |||
| 4585 | cur_block_addr = next_entry.phys_addr; | ||
| 4586 | } | ||
| 4587 | } | ||
| 4588 | |||
| 4589 | // Map the end page, if we have one. | ||
| 4590 | if (end_partial_page != 0) { | ||
| 4591 | // Ensure the page holds correct data. | ||
| 4592 | u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page); | ||
| 4593 | if (send) { | ||
| 4594 | const size_t copy_size = src_end - mapping_src_end; | ||
| 4595 | std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr), | ||
| 4596 | copy_size); | ||
| 4597 | std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size); | ||
| 4598 | } else { | ||
| 4599 | std::memset(end_partial_virt, fill_val, PageSize); | ||
| 4600 | } | ||
| 4601 | |||
| 4602 | // Map the page. | ||
| 4603 | const KPageProperties map_properties = {test_perm, false, false, | ||
| 4604 | (cur_mapped_addr == dst_addr) | ||
| 4605 | ? DisableMergeAttribute::DisableHead | ||
| 4606 | : DisableMergeAttribute::None}; | ||
| 4607 | R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, | ||
| 4608 | map_properties, OperationType::Map, false)); | ||
| 4609 | } | ||
| 4610 | |||
| 4611 | // Update memory blocks to reflect our changes | ||
| 4612 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 4613 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 4614 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 4615 | KMemoryBlockDisableMergeAttribute::None); | ||
| 4616 | |||
| 4617 | // Set the output address. | ||
| 4618 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 4619 | |||
| 4620 | // We succeeded. | ||
| 4621 | memory_reservation.Commit(); | ||
| 4622 | R_SUCCEED(); | ||
| 4623 | } | ||
| 4624 | |||
| 4625 | Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, | ||
| 4626 | KProcessAddress src_addr, KPageTableBase& src_page_table, | ||
| 4627 | KMemoryPermission test_perm, KMemoryState dst_state, bool send) { | ||
| 4628 | // For convenience, alias this. | ||
| 4629 | KPageTableBase& dst_page_table = *this; | ||
| 4630 | |||
| 4631 | // Acquire the table locks. | ||
| 4632 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 4633 | |||
| 4634 | // We're going to perform an update, so create a helper. | ||
| 4635 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 4636 | |||
| 4637 | // Perform client setup. | ||
| 4638 | size_t num_allocator_blocks; | ||
| 4639 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 4640 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 4641 | test_perm, dst_state)); | ||
| 4642 | |||
| 4643 | // Create an update allocator. | ||
| 4644 | Result allocator_result; | ||
| 4645 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4646 | src_page_table.m_memory_block_slab_manager, | ||
| 4647 | num_allocator_blocks); | ||
| 4648 | R_TRY(allocator_result); | ||
| 4649 | |||
| 4650 | // Get the mapped extents. | ||
| 4651 | const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize); | ||
| 4652 | const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize); | ||
| 4653 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 4654 | |||
| 4655 | // Ensure that we clean up appropriately if we fail after this. | ||
| 4656 | const auto src_perm = static_cast<KMemoryPermission>( | ||
| 4657 | (test_perm == KMemoryPermission::UserReadWrite) | ||
| 4658 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 4659 | : KMemoryPermission::UserRead); | ||
| 4660 | ON_RESULT_FAILURE { | ||
| 4661 | if (src_map_end > src_map_start) { | ||
| 4662 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 4663 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 4664 | } | ||
| 4665 | }; | ||
| 4666 | |||
| 4667 | // Perform server setup. | ||
| 4668 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 4669 | src_page_table, send)); | ||
| 4670 | |||
| 4671 | // If anything was mapped, ipc-lock the pages. | ||
| 4672 | if (src_map_start < src_map_end) { | ||
| 4673 | // Get the source permission. | ||
| 4674 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 4675 | (src_map_end - src_map_start) / PageSize, | ||
| 4676 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 4677 | } | ||
| 4678 | |||
| 4679 | R_SUCCEED(); | ||
| 4680 | } | ||
| 4681 | |||
| 4682 | Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, | ||
| 4683 | KMemoryState dst_state) { | ||
| 4684 | // Validate the address. | ||
| 4685 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 4686 | |||
| 4687 | // Lock the table. | ||
| 4688 | KScopedLightLock lk(m_general_lock); | ||
| 4689 | |||
| 4690 | // Validate the memory state. | ||
| 4691 | size_t num_allocator_blocks; | ||
| 4692 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 4693 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 4694 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 4695 | KMemoryAttribute::None)); | ||
| 4696 | |||
| 4697 | // Create an update allocator. | ||
| 4698 | Result allocator_result; | ||
| 4699 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4700 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 4701 | R_TRY(allocator_result); | ||
| 4702 | |||
| 4703 | // We're going to perform an update, so create a helper. | ||
| 4704 | KScopedPageTableUpdater updater(this); | ||
| 4705 | |||
| 4706 | // Get aligned extents. | ||
| 4707 | const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize); | ||
| 4708 | const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize); | ||
| 4709 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 4710 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 4711 | |||
| 4712 | // Unmap the pages. | ||
| 4713 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 4714 | DisableMergeAttribute::None}; | ||
| 4715 | R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false, | ||
| 4716 | unmap_properties, OperationType::Unmap, false)); | ||
| 4717 | |||
| 4718 | // Update memory blocks. | ||
| 4719 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 4720 | KMemoryState::None, KMemoryPermission::None, | ||
| 4721 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 4722 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 4723 | |||
| 4724 | // Release from the resource limit as relevant. | ||
| 4725 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 4726 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 4727 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 4728 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, | ||
| 4729 | aligned_size - mapping_size); | ||
| 4730 | |||
| 4731 | R_SUCCEED(); | ||
| 4732 | } | ||
| 4733 | |||
| 4734 | Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, | ||
| 4735 | KMemoryState dst_state) { | ||
| 4736 | // Validate the address. | ||
| 4737 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 4738 | |||
| 4739 | // Get aligned source extents. | ||
| 4740 | const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize); | ||
| 4741 | const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize); | ||
| 4742 | const KProcessAddress mapping_last = mapping_end - 1; | ||
| 4743 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 4744 | |||
| 4745 | // If nothing was mapped, we're actually done immediately. | ||
| 4746 | R_SUCCEED_IF(mapping_size == 0); | ||
| 4747 | |||
| 4748 | // Get the test state and attribute mask. | ||
| 4749 | KMemoryState test_state; | ||
| 4750 | KMemoryAttribute test_attr_mask; | ||
| 4751 | switch (dst_state) { | ||
| 4752 | case KMemoryState::Ipc: | ||
| 4753 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 4754 | test_attr_mask = | ||
| 4755 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 4756 | break; | ||
| 4757 | case KMemoryState::NonSecureIpc: | ||
| 4758 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 4759 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4760 | break; | ||
| 4761 | case KMemoryState::NonDeviceIpc: | ||
| 4762 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 4763 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 4764 | break; | ||
| 4765 | default: | ||
| 4766 | R_THROW(ResultInvalidCombination); | ||
| 4767 | } | ||
| 4768 | |||
| 4769 | // Lock the table. | ||
| 4770 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 4771 | // convention elsewhere in KPageTableBase. | ||
| 4772 | KScopedLightLock lk(m_general_lock); | ||
| 4773 | |||
| 4774 | // We're going to perform an update, so create a helper. | ||
| 4775 | KScopedPageTableUpdater updater(this); | ||
| 4776 | |||
| 4777 | // Ensure that on failure, we roll back appropriately. | ||
| 4778 | size_t mapped_size = 0; | ||
| 4779 | ON_RESULT_FAILURE { | ||
| 4780 | if (mapped_size > 0) { | ||
| 4781 | // Determine where the mapping ends. | ||
| 4782 | const auto mapped_end = GetInteger(mapping_start) + mapped_size; | ||
| 4783 | const auto mapped_last = mapped_end - 1; | ||
| 4784 | |||
| 4785 | // Get current and next iterators. | ||
| 4786 | KMemoryBlockManager::const_iterator start_it = | ||
| 4787 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 4788 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 4789 | ++next_it; | ||
| 4790 | |||
| 4791 | // Get the current block info. | ||
| 4792 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 4793 | |||
| 4794 | // Create tracking variables. | ||
| 4795 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 4796 | size_t cur_size = cur_info.GetSize(); | ||
| 4797 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 4798 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 4799 | bool first = cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 4800 | False(cur_info.GetDisableMergeAttribute() & | ||
| 4801 | KMemoryBlockDisableMergeAttribute::Locked); | ||
| 4802 | |||
| 4803 | while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) { | ||
| 4804 | // Check that we have a next block. | ||
| 4805 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 4806 | |||
| 4807 | // Get the next info. | ||
| 4808 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 4809 | |||
| 4810 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 4811 | const bool next_perm_eq = | ||
| 4812 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 4813 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 4814 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 4815 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 4816 | // We can consolidate the reprotection for the current and next block into a | ||
| 4817 | // single call. | ||
| 4818 | cur_size += next_info.GetSize(); | ||
| 4819 | } else { | ||
| 4820 | // We have to operate on the current block. | ||
| 4821 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 4822 | const KPageProperties properties = { | ||
| 4823 | cur_info.GetPermission(), false, false, | ||
| 4824 | first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail | ||
| 4825 | : DisableMergeAttribute::None}; | ||
| 4826 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, | ||
| 4827 | cur_size / PageSize, 0, false, properties, | ||
| 4828 | OperationType::ChangePermissions, true)); | ||
| 4829 | } | ||
| 4830 | |||
| 4831 | // Advance. | ||
| 4832 | cur_address = next_info.GetAddress(); | ||
| 4833 | cur_size = next_info.GetSize(); | ||
| 4834 | first = false; | ||
| 4835 | } | ||
| 4836 | |||
| 4837 | // Advance. | ||
| 4838 | cur_info = next_info; | ||
| 4839 | cur_perm_eq = next_perm_eq; | ||
| 4840 | cur_needs_set_perm = next_needs_set_perm; | ||
| 4841 | ++next_it; | ||
| 4842 | } | ||
| 4843 | |||
| 4844 | // Process the last block. | ||
| 4845 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 4846 | const KPageProperties properties = { | ||
| 4847 | cur_info.GetPermission(), false, false, | ||
| 4848 | first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail | ||
| 4849 | : DisableMergeAttribute::None}; | ||
| 4850 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, | ||
| 4851 | false, properties, OperationType::ChangePermissions, true)); | ||
| 4852 | } | ||
| 4853 | } | ||
| 4854 | }; | ||
| 4855 | |||
| 4856 | // Iterate, reprotecting as needed. | ||
| 4857 | { | ||
| 4858 | // Get current and next iterators. | ||
| 4859 | KMemoryBlockManager::const_iterator start_it = | ||
| 4860 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 4861 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 4862 | ++next_it; | ||
| 4863 | |||
| 4864 | // Validate the current block. | ||
| 4865 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 4866 | R_ASSERT(this->CheckMemoryState( | ||
| 4867 | cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None, | ||
| 4868 | test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked)); | ||
| 4869 | |||
| 4870 | // Create tracking variables. | ||
| 4871 | KProcessAddress cur_address = cur_info.GetAddress(); | ||
| 4872 | size_t cur_size = cur_info.GetSize(); | ||
| 4873 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 4874 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 4875 | bool first = | ||
| 4876 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 4877 | False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked); | ||
| 4878 | |||
| 4879 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 4880 | // Check that we have a next block. | ||
| 4881 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 4882 | |||
| 4883 | // Get the next info. | ||
| 4884 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 4885 | |||
| 4886 | // Validate the next block. | ||
| 4887 | R_ASSERT(this->CheckMemoryState( | ||
| 4888 | next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None, | ||
| 4889 | test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked)); | ||
| 4890 | |||
| 4891 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 4892 | const bool next_perm_eq = | ||
| 4893 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 4894 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 4895 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 4896 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 4897 | // We can consolidate the reprotection for the current and next block into a single | ||
| 4898 | // call. | ||
| 4899 | cur_size += next_info.GetSize(); | ||
| 4900 | } else { | ||
| 4901 | // We have to operate on the current block. | ||
| 4902 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 4903 | const KPageProperties properties = { | ||
| 4904 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 4905 | : cur_info.GetPermission(), | ||
| 4906 | false, false, | ||
| 4907 | first ? DisableMergeAttribute::EnableHeadAndBody | ||
| 4908 | : DisableMergeAttribute::None}; | ||
| 4909 | R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, | ||
| 4910 | false, properties, OperationType::ChangePermissions, | ||
| 4911 | false)); | ||
| 4912 | } | ||
| 4913 | |||
| 4914 | // Mark that we mapped the block. | ||
| 4915 | mapped_size += cur_size; | ||
| 4916 | |||
| 4917 | // Advance. | ||
| 4918 | cur_address = next_info.GetAddress(); | ||
| 4919 | cur_size = next_info.GetSize(); | ||
| 4920 | first = false; | ||
| 4921 | } | ||
| 4922 | |||
| 4923 | // Advance. | ||
| 4924 | cur_info = next_info; | ||
| 4925 | cur_perm_eq = next_perm_eq; | ||
| 4926 | cur_needs_set_perm = next_needs_set_perm; | ||
| 4927 | ++next_it; | ||
| 4928 | } | ||
| 4929 | |||
| 4930 | // Process the last block. | ||
| 4931 | const auto lock_count = | ||
| 4932 | cur_info.GetIpcLockCount() + | ||
| 4933 | (next_it != m_memory_block_manager.end() | ||
| 4934 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 4935 | : 0); | ||
| 4936 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 4937 | const DisableMergeAttribute head_body_attr = | ||
| 4938 | first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None; | ||
| 4939 | const DisableMergeAttribute tail_attr = | ||
| 4940 | lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None; | ||
| 4941 | const KPageProperties properties = { | ||
| 4942 | cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), | ||
| 4943 | false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)}; | ||
| 4944 | R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false, | ||
| 4945 | properties, OperationType::ChangePermissions, false)); | ||
| 4946 | } | ||
| 4947 | } | ||
| 4948 | |||
| 4949 | // Create an update allocator. | ||
| 4950 | // NOTE: Guaranteed zero blocks needed here. | ||
| 4951 | Result allocator_result; | ||
| 4952 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 4953 | m_memory_block_slab_manager, 0); | ||
| 4954 | R_TRY(allocator_result); | ||
| 4955 | |||
| 4956 | // Unlock the pages. | ||
| 4957 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 4958 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 4959 | KMemoryPermission::None); | ||
| 4960 | |||
| 4961 | R_SUCCEED(); | ||
| 4962 | } | ||
| 4963 | |||
| 4964 | void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, | ||
| 4965 | KProcessAddress address, size_t size, | ||
| 4966 | KMemoryPermission prot_perm) { | ||
| 4967 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 4968 | ASSERT(Common::IsAligned(GetInteger(address), PageSize)); | ||
| 4969 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 4970 | |||
| 4971 | // Get the mapped extents. | ||
| 4972 | const KProcessAddress src_map_start = address; | ||
| 4973 | const KProcessAddress src_map_end = address + size; | ||
| 4974 | const KProcessAddress src_map_last = src_map_end - 1; | ||
| 4975 | |||
| 4976 | // This function is only invoked when there's something to do. | ||
| 4977 | ASSERT(src_map_end > src_map_start); | ||
| 4978 | |||
| 4979 | // Iterate over blocks, fixing permissions. | ||
| 4980 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 4981 | while (true) { | ||
| 4982 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 4983 | |||
| 4984 | const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) | ||
| 4985 | ? info.GetAddress() | ||
| 4986 | : GetInteger(src_map_start); | ||
| 4987 | const auto cur_end = | ||
| 4988 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 4989 | |||
| 4990 | // If we can, fix the protections on the block. | ||
| 4991 | if ((info.GetIpcLockCount() == 0 && | ||
| 4992 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 4993 | (info.GetIpcLockCount() != 0 && | ||
| 4994 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 4995 | // Check if we actually need to fix the protections on the block. | ||
| 4996 | if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || | ||
| 4997 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 4998 | const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) | ||
| 4999 | ? (False(info.GetDisableMergeAttribute() & | ||
| 5000 | (KMemoryBlockDisableMergeAttribute::Locked | | ||
| 5001 | KMemoryBlockDisableMergeAttribute::IpcLeft))) | ||
| 5002 | : info.GetAddress() <= GetInteger(src_map_start); | ||
| 5003 | |||
| 5004 | const DisableMergeAttribute head_body_attr = | ||
| 5005 | start_nc ? DisableMergeAttribute::EnableHeadAndBody | ||
| 5006 | : DisableMergeAttribute::None; | ||
| 5007 | DisableMergeAttribute tail_attr; | ||
| 5008 | if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) { | ||
| 5009 | auto next_it = it; | ||
| 5010 | ++next_it; | ||
| 5011 | |||
| 5012 | const auto lock_count = | ||
| 5013 | info.GetIpcLockCount() + | ||
| 5014 | (next_it != m_memory_block_manager.end() | ||
| 5015 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 5016 | : 0); | ||
| 5017 | tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail | ||
| 5018 | : DisableMergeAttribute::None; | ||
| 5019 | } else { | ||
| 5020 | tail_attr = DisableMergeAttribute::None; | ||
| 5021 | } | ||
| 5022 | |||
| 5023 | const KPageProperties properties = { | ||
| 5024 | info.GetPermission(), false, false, | ||
| 5025 | static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)}; | ||
| 5026 | R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0, | ||
| 5027 | false, properties, OperationType::ChangePermissions, true)); | ||
| 5028 | } | ||
| 5029 | } | ||
| 5030 | |||
| 5031 | // If we're past the end of the region, we're done. | ||
| 5032 | if (src_map_last <= info.GetLastAddress()) { | ||
| 5033 | break; | ||
| 5034 | } | ||
| 5035 | |||
| 5036 | // Advance. | ||
| 5037 | ++it; | ||
| 5038 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5039 | } | ||
| 5040 | } | ||
| 5041 | |||
| 5042 | Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 5043 | // Lock the physical memory lock. | ||
| 5044 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 5045 | |||
| 5046 | // Calculate the last address for convenience. | ||
| 5047 | const KProcessAddress last_address = address + size - 1; | ||
| 5048 | |||
| 5049 | // Define iteration variables. | ||
| 5050 | KProcessAddress cur_address; | ||
| 5051 | size_t mapped_size; | ||
| 5052 | |||
| 5053 | // The entire mapping process can be retried. | ||
| 5054 | while (true) { | ||
| 5055 | // Check if the memory is already mapped. | ||
| 5056 | { | ||
| 5057 | // Lock the table. | ||
| 5058 | KScopedLightLock lk(m_general_lock); | ||
| 5059 | |||
| 5060 | // Iterate over the memory. | ||
| 5061 | cur_address = address; | ||
| 5062 | mapped_size = 0; | ||
| 5063 | |||
| 5064 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5065 | while (true) { | ||
| 5066 | // Check that the iterator is valid. | ||
| 5067 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5068 | |||
| 5069 | // Get the memory info. | ||
| 5070 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5071 | |||
| 5072 | // Check if we're done. | ||
| 5073 | if (last_address <= info.GetLastAddress()) { | ||
| 5074 | if (info.GetState() != KMemoryState::Free) { | ||
| 5075 | mapped_size += (last_address + 1 - cur_address); | ||
| 5076 | } | ||
| 5077 | break; | ||
| 5078 | } | ||
| 5079 | |||
| 5080 | // Track the memory if it's mapped. | ||
| 5081 | if (info.GetState() != KMemoryState::Free) { | ||
| 5082 | mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 5083 | } | ||
| 5084 | |||
| 5085 | // Advance. | ||
| 5086 | cur_address = info.GetEndAddress(); | ||
| 5087 | ++it; | ||
| 5088 | } | ||
| 5089 | |||
| 5090 | // If the size mapped is the size requested, we've nothing to do. | ||
| 5091 | R_SUCCEED_IF(size == mapped_size); | ||
| 5092 | } | ||
| 5093 | |||
| 5094 | // Allocate and map the memory. | ||
| 5095 | { | ||
| 5096 | // Reserve the memory from the process resource limit. | ||
| 5097 | KScopedResourceReservation memory_reservation( | ||
| 5098 | m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size); | ||
| 5099 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 5100 | |||
| 5101 | // Allocate pages for the new memory. | ||
| 5102 | KPageGroup pg(m_kernel, m_block_info_manager); | ||
| 5103 | R_TRY(m_kernel.MemoryManager().AllocateForProcess( | ||
| 5104 | std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option, | ||
| 5105 | GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value)); | ||
| 5106 | |||
| 5107 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 5108 | auto pg_guard = SCOPE_GUARD({ | ||
| 5109 | pg.OpenFirst(); | ||
| 5110 | pg.Close(); | ||
| 5111 | }); | ||
| 5112 | |||
| 5113 | // Map the memory. | ||
| 5114 | { | ||
| 5115 | // Lock the table. | ||
| 5116 | KScopedLightLock lk(m_general_lock); | ||
| 5117 | |||
| 5118 | size_t num_allocator_blocks = 0; | ||
| 5119 | |||
| 5120 | // Verify that nobody has mapped memory since we first checked. | ||
| 5121 | { | ||
| 5122 | // Iterate over the memory. | ||
| 5123 | size_t checked_mapped_size = 0; | ||
| 5124 | cur_address = address; | ||
| 5125 | |||
| 5126 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5127 | while (true) { | ||
| 5128 | // Check that the iterator is valid. | ||
| 5129 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5130 | |||
| 5131 | // Get the memory info. | ||
| 5132 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5133 | |||
| 5134 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 5135 | if (is_free) { | ||
| 5136 | if (info.GetAddress() < GetInteger(address)) { | ||
| 5137 | ++num_allocator_blocks; | ||
| 5138 | } | ||
| 5139 | if (last_address < info.GetLastAddress()) { | ||
| 5140 | ++num_allocator_blocks; | ||
| 5141 | } | ||
| 5142 | } | ||
| 5143 | |||
| 5144 | // Check if we're done. | ||
| 5145 | if (last_address <= info.GetLastAddress()) { | ||
| 5146 | if (!is_free) { | ||
| 5147 | checked_mapped_size += (last_address + 1 - cur_address); | ||
| 5148 | } | ||
| 5149 | break; | ||
| 5150 | } | ||
| 5151 | |||
| 5152 | // Track the memory if it's mapped. | ||
| 5153 | if (!is_free) { | ||
| 5154 | checked_mapped_size += | ||
| 5155 | KProcessAddress(info.GetEndAddress()) - cur_address; | ||
| 5156 | } | ||
| 5157 | |||
| 5158 | // Advance. | ||
| 5159 | cur_address = info.GetEndAddress(); | ||
| 5160 | ++it; | ||
| 5161 | } | ||
| 5162 | |||
| 5163 | // If the size now isn't what it was before, somebody mapped or unmapped | ||
| 5164 | // concurrently. If this happened, retry. | ||
| 5165 | if (mapped_size != checked_mapped_size) { | ||
| 5166 | continue; | ||
| 5167 | } | ||
| 5168 | } | ||
| 5169 | |||
| 5170 | // Create an update allocator. | ||
| 5171 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 5172 | Result allocator_result; | ||
| 5173 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 5174 | m_memory_block_slab_manager, | ||
| 5175 | num_allocator_blocks); | ||
| 5176 | R_TRY(allocator_result); | ||
| 5177 | |||
| 5178 | // We're going to perform an update, so create a helper. | ||
| 5179 | KScopedPageTableUpdater updater(this); | ||
| 5180 | |||
| 5181 | // Prepare to iterate over the memory. | ||
| 5182 | auto pg_it = pg.begin(); | ||
| 5183 | KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); | ||
| 5184 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 5185 | |||
| 5186 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 5187 | pg_guard.Cancel(); | ||
| 5188 | cur_address = address; | ||
| 5189 | ON_RESULT_FAILURE { | ||
| 5190 | if (cur_address > address) { | ||
| 5191 | const KProcessAddress last_unmap_address = cur_address - 1; | ||
| 5192 | |||
| 5193 | // Iterate, unmapping the pages. | ||
| 5194 | cur_address = address; | ||
| 5195 | |||
| 5196 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5197 | while (true) { | ||
| 5198 | // Check that the iterator is valid. | ||
| 5199 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5200 | |||
| 5201 | // Get the memory info. | ||
| 5202 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5203 | |||
| 5204 | // If the memory state is free, we mapped it and need to unmap it. | ||
| 5205 | if (info.GetState() == KMemoryState::Free) { | ||
| 5206 | // Determine the range to unmap. | ||
| 5207 | const KPageProperties unmap_properties = { | ||
| 5208 | KMemoryPermission::None, false, false, | ||
| 5209 | DisableMergeAttribute::None}; | ||
| 5210 | const size_t cur_pages = | ||
| 5211 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 5212 | last_unmap_address + 1 - cur_address) / | ||
| 5213 | PageSize; | ||
| 5214 | |||
| 5215 | // Unmap. | ||
| 5216 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, | ||
| 5217 | cur_pages, 0, false, unmap_properties, | ||
| 5218 | OperationType::Unmap, true)); | ||
| 5219 | } | ||
| 5220 | |||
| 5221 | // Check if we're done. | ||
| 5222 | if (last_unmap_address <= info.GetLastAddress()) { | ||
| 5223 | break; | ||
| 5224 | } | ||
| 5225 | |||
| 5226 | // Advance. | ||
| 5227 | cur_address = info.GetEndAddress(); | ||
| 5228 | ++it; | ||
| 5229 | } | ||
| 5230 | } | ||
| 5231 | |||
| 5232 | // Release any remaining unmapped memory. | ||
| 5233 | m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages); | ||
| 5234 | m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages); | ||
| 5235 | for (++pg_it; pg_it != pg.end(); ++pg_it) { | ||
| 5236 | m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 5237 | pg_it->GetNumPages()); | ||
| 5238 | m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages()); | ||
| 5239 | } | ||
| 5240 | }; | ||
| 5241 | |||
| 5242 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5243 | while (true) { | ||
| 5244 | // Check that the iterator is valid. | ||
| 5245 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5246 | |||
| 5247 | // Get the memory info. | ||
| 5248 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5249 | |||
| 5250 | // If it's unmapped, we need to map it. | ||
| 5251 | if (info.GetState() == KMemoryState::Free) { | ||
| 5252 | // Determine the range to map. | ||
| 5253 | const KPageProperties map_properties = { | ||
| 5254 | KMemoryPermission::UserReadWrite, false, false, | ||
| 5255 | cur_address == this->GetAliasRegionStart() | ||
| 5256 | ? DisableMergeAttribute::DisableHead | ||
| 5257 | : DisableMergeAttribute::None}; | ||
| 5258 | size_t map_pages = | ||
| 5259 | std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 5260 | last_address + 1 - cur_address) / | ||
| 5261 | PageSize; | ||
| 5262 | |||
| 5263 | // While we have pages to map, map them. | ||
| 5264 | { | ||
| 5265 | // Create a page group for the current mapping range. | ||
| 5266 | KPageGroup cur_pg(m_kernel, m_block_info_manager); | ||
| 5267 | { | ||
| 5268 | ON_RESULT_FAILURE_2 { | ||
| 5269 | cur_pg.OpenFirst(); | ||
| 5270 | cur_pg.Close(); | ||
| 5271 | }; | ||
| 5272 | |||
| 5273 | size_t remain_pages = map_pages; | ||
| 5274 | while (remain_pages > 0) { | ||
| 5275 | // Check if we're at the end of the physical block. | ||
| 5276 | if (pg_pages == 0) { | ||
| 5277 | // Ensure there are more pages to map. | ||
| 5278 | ASSERT(pg_it != pg.end()); | ||
| 5279 | |||
| 5280 | // Advance our physical block. | ||
| 5281 | ++pg_it; | ||
| 5282 | pg_phys_addr = pg_it->GetAddress(); | ||
| 5283 | pg_pages = pg_it->GetNumPages(); | ||
| 5284 | } | ||
| 5285 | |||
| 5286 | // Add whatever we can to the current block. | ||
| 5287 | const size_t cur_pages = std::min(pg_pages, remain_pages); | ||
| 5288 | R_TRY(cur_pg.AddBlock(pg_phys_addr + | ||
| 5289 | ((pg_pages - cur_pages) * PageSize), | ||
| 5290 | cur_pages)); | ||
| 5291 | |||
| 5292 | // Advance. | ||
| 5293 | remain_pages -= cur_pages; | ||
| 5294 | pg_pages -= cur_pages; | ||
| 5295 | } | ||
| 5296 | } | ||
| 5297 | |||
| 5298 | // Map the papges. | ||
| 5299 | R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, | ||
| 5300 | cur_pg, map_properties, | ||
| 5301 | OperationType::MapFirstGroup, false)); | ||
| 5302 | } | ||
| 5303 | } | ||
| 5304 | |||
| 5305 | // Check if we're done. | ||
| 5306 | if (last_address <= info.GetLastAddress()) { | ||
| 5307 | break; | ||
| 5308 | } | ||
| 5309 | |||
| 5310 | // Advance. | ||
| 5311 | cur_address = info.GetEndAddress(); | ||
| 5312 | ++it; | ||
| 5313 | } | ||
| 5314 | |||
| 5315 | // We succeeded, so commit the memory reservation. | ||
| 5316 | memory_reservation.Commit(); | ||
| 5317 | |||
| 5318 | // Increase our tracked mapped size. | ||
| 5319 | m_mapped_physical_memory_size += (size - mapped_size); | ||
| 5320 | |||
| 5321 | // Update the relevant memory blocks. | ||
| 5322 | m_memory_block_manager.UpdateIfMatch( | ||
| 5323 | std::addressof(allocator), address, size / PageSize, KMemoryState::Free, | ||
| 5324 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | ||
| 5325 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None, | ||
| 5326 | address == this->GetAliasRegionStart() | ||
| 5327 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 5328 | : KMemoryBlockDisableMergeAttribute::None, | ||
| 5329 | KMemoryBlockDisableMergeAttribute::None); | ||
| 5330 | |||
| 5331 | R_SUCCEED(); | ||
| 5332 | } | ||
| 5333 | } | ||
| 5334 | } | ||
| 5335 | } | ||
| 5336 | |||
| 5337 | Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 5338 | // Lock the physical memory lock. | ||
| 5339 | KScopedLightLock phys_lk(m_map_physical_memory_lock); | ||
| 5340 | |||
| 5341 | // Lock the table. | ||
| 5342 | KScopedLightLock lk(m_general_lock); | ||
| 5343 | |||
| 5344 | // Calculate the last address for convenience. | ||
| 5345 | const KProcessAddress last_address = address + size - 1; | ||
| 5346 | |||
| 5347 | // Define iteration variables. | ||
| 5348 | KProcessAddress map_start_address = 0; | ||
| 5349 | KProcessAddress map_last_address = 0; | ||
| 5350 | |||
| 5351 | KProcessAddress cur_address; | ||
| 5352 | size_t mapped_size; | ||
| 5353 | size_t num_allocator_blocks = 0; | ||
| 5354 | |||
| 5355 | // Check if the memory is mapped. | ||
| 5356 | { | ||
| 5357 | // Iterate over the memory. | ||
| 5358 | cur_address = address; | ||
| 5359 | mapped_size = 0; | ||
| 5360 | |||
| 5361 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5362 | while (true) { | ||
| 5363 | // Check that the iterator is valid. | ||
| 5364 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5365 | |||
| 5366 | // Get the memory info. | ||
| 5367 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5368 | |||
| 5369 | // Verify the memory's state. | ||
| 5370 | const bool is_normal = info.GetState() == KMemoryState::Normal && | ||
| 5371 | info.GetAttribute() == KMemoryAttribute::None; | ||
| 5372 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 5373 | R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); | ||
| 5374 | |||
| 5375 | if (is_normal) { | ||
| 5376 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | ||
| 5377 | |||
| 5378 | if (map_start_address == 0) { | ||
| 5379 | map_start_address = cur_address; | ||
| 5380 | } | ||
| 5381 | map_last_address = | ||
| 5382 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 5383 | |||
| 5384 | if (info.GetAddress() < GetInteger(address)) { | ||
| 5385 | ++num_allocator_blocks; | ||
| 5386 | } | ||
| 5387 | if (last_address < info.GetLastAddress()) { | ||
| 5388 | ++num_allocator_blocks; | ||
| 5389 | } | ||
| 5390 | |||
| 5391 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 5392 | } | ||
| 5393 | |||
| 5394 | // Check if we're done. | ||
| 5395 | if (last_address <= info.GetLastAddress()) { | ||
| 5396 | break; | ||
| 5397 | } | ||
| 5398 | |||
| 5399 | // Advance. | ||
| 5400 | cur_address = info.GetEndAddress(); | ||
| 5401 | ++it; | ||
| 5402 | } | ||
| 5403 | |||
| 5404 | // If there's nothing mapped, we've nothing to do. | ||
| 5405 | R_SUCCEED_IF(mapped_size == 0); | ||
| 5406 | } | ||
| 5407 | |||
| 5408 | // Create an update allocator. | ||
| 5409 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 5410 | Result allocator_result; | ||
| 5411 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 5412 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 5413 | R_TRY(allocator_result); | ||
| 5414 | |||
| 5415 | // We're going to perform an update, so create a helper. | ||
| 5416 | KScopedPageTableUpdater updater(this); | ||
| 5417 | |||
| 5418 | // Separate the mapping. | ||
| 5419 | const KPageProperties sep_properties = {KMemoryPermission::None, false, false, | ||
| 5420 | DisableMergeAttribute::None}; | ||
| 5421 | R_TRY(this->Operate(updater.GetPageList(), map_start_address, | ||
| 5422 | (map_last_address + 1 - map_start_address) / PageSize, 0, false, | ||
| 5423 | sep_properties, OperationType::Separate, false)); | ||
| 5424 | |||
| 5425 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 5426 | cur_address = address; | ||
| 5427 | |||
| 5428 | // Iterate over the memory, unmapping as we go. | ||
| 5429 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 5430 | |||
| 5431 | const auto clear_merge_attr = | ||
| 5432 | (it->GetState() == KMemoryState::Normal && | ||
| 5433 | it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) | ||
| 5434 | ? KMemoryBlockDisableMergeAttribute::Normal | ||
| 5435 | : KMemoryBlockDisableMergeAttribute::None; | ||
| 5436 | |||
| 5437 | while (true) { | ||
| 5438 | // Check that the iterator is valid. | ||
| 5439 | ASSERT(it != m_memory_block_manager.end()); | ||
| 5440 | |||
| 5441 | // Get the memory info. | ||
| 5442 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 5443 | |||
| 5444 | // If the memory state is normal, we need to unmap it. | ||
| 5445 | if (info.GetState() == KMemoryState::Normal) { | ||
| 5446 | // Determine the range to unmap. | ||
| 5447 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 5448 | DisableMergeAttribute::None}; | ||
| 5449 | const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, | ||
| 5450 | last_address + 1 - cur_address) / | ||
| 5451 | PageSize; | ||
| 5452 | |||
| 5453 | // Unmap. | ||
| 5454 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, | ||
| 5455 | unmap_properties, OperationType::Unmap, false)); | ||
| 5456 | } | ||
| 5457 | |||
| 5458 | // Check if we're done. | ||
| 5459 | if (last_address <= info.GetLastAddress()) { | ||
| 5460 | break; | ||
| 5461 | } | ||
| 5462 | |||
| 5463 | // Advance. | ||
| 5464 | cur_address = info.GetEndAddress(); | ||
| 5465 | ++it; | ||
| 5466 | } | ||
| 5467 | |||
| 5468 | // Release the memory resource. | ||
| 5469 | m_mapped_physical_memory_size -= mapped_size; | ||
| 5470 | m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size); | ||
| 5471 | |||
| 5472 | // Update memory blocks. | ||
| 5473 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | ||
| 5474 | KMemoryState::Free, KMemoryPermission::None, | ||
| 5475 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 5476 | clear_merge_attr); | ||
| 5477 | |||
| 5478 | // We succeeded. | ||
| 5479 | R_SUCCEED(); | ||
| 5480 | } | ||
| 5481 | |||
| 5482 | Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 5483 | UNIMPLEMENTED(); | ||
| 5484 | R_THROW(ResultNotImplemented); | ||
| 5485 | } | ||
| 5486 | |||
| 5487 | Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 5488 | UNIMPLEMENTED(); | ||
| 5489 | R_THROW(ResultNotImplemented); | ||
| 5490 | } | ||
| 5491 | |||
| 5492 | Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, | ||
| 5493 | KPageTableBase& src_page_table, | ||
| 5494 | KProcessAddress src_address) { | ||
| 5495 | // We need to lock both this table, and the current process's table, so set up an alias. | ||
| 5496 | KPageTableBase& dst_page_table = *this; | ||
| 5497 | |||
| 5498 | // Acquire the table locks. | ||
| 5499 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 5500 | |||
| 5501 | // Check that the memory is mapped in the destination process. | ||
| 5502 | size_t num_allocator_blocks; | ||
| 5503 | R_TRY(dst_page_table.CheckMemoryState( | ||
| 5504 | std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All, | ||
| 5505 | KMemoryState::SharedCode, KMemoryPermission::UserReadWrite, | ||
| 5506 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); | ||
| 5507 | |||
| 5508 | // Check that the memory is mapped in the source process. | ||
| 5509 | R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess, | ||
| 5510 | KMemoryState::FlagCanMapProcess, KMemoryPermission::None, | ||
| 5511 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 5512 | KMemoryAttribute::None)); | ||
| 5513 | |||
| 5514 | // Validate that the memory ranges are compatible. | ||
| 5515 | { | ||
| 5516 | // Define a helper type. | ||
| 5517 | struct ContiguousRangeInfo { | ||
| 5518 | public: | ||
| 5519 | KPageTableBase& m_pt; | ||
| 5520 | TraversalContext m_context; | ||
| 5521 | TraversalEntry m_entry; | ||
| 5522 | KPhysicalAddress m_phys_addr; | ||
| 5523 | size_t m_cur_size; | ||
| 5524 | size_t m_remaining_size; | ||
| 5525 | |||
| 5526 | public: | ||
| 5527 | ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size) | ||
| 5528 | : m_pt(pt), m_remaining_size(size) { | ||
| 5529 | // Begin a traversal. | ||
| 5530 | ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), | ||
| 5531 | std::addressof(m_context), address)); | ||
| 5532 | |||
| 5533 | // Setup tracking fields. | ||
| 5534 | m_phys_addr = m_entry.phys_addr; | ||
| 5535 | m_cur_size = std::min<size_t>( | ||
| 5536 | m_remaining_size, | ||
| 5537 | m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1))); | ||
| 5538 | |||
| 5539 | // Consume the whole contiguous block. | ||
| 5540 | this->DetermineContiguousBlockExtents(); | ||
| 5541 | } | ||
| 5542 | |||
| 5543 | void ContinueTraversal() { | ||
| 5544 | // Update our remaining size. | ||
| 5545 | m_remaining_size = m_remaining_size - m_cur_size; | ||
| 5546 | |||
| 5547 | // Update our tracking fields. | ||
| 5548 | if (m_remaining_size > 0) { | ||
| 5549 | m_phys_addr = m_entry.phys_addr; | ||
| 5550 | m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size); | ||
| 5551 | |||
| 5552 | // Consume the whole contiguous block. | ||
| 5553 | this->DetermineContiguousBlockExtents(); | ||
| 5554 | } | ||
| 5555 | } | ||
| 5556 | |||
| 5557 | private: | ||
| 5558 | void DetermineContiguousBlockExtents() { | ||
| 5559 | // Continue traversing until we're not contiguous, or we have enough. | ||
| 5560 | while (m_cur_size < m_remaining_size) { | ||
| 5561 | ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry), | ||
| 5562 | std::addressof(m_context))); | ||
| 5563 | |||
| 5564 | // If we're not contiguous, we're done. | ||
| 5565 | if (m_entry.phys_addr != m_phys_addr + m_cur_size) { | ||
| 5566 | break; | ||
| 5567 | } | ||
| 5568 | |||
| 5569 | // Update our current size. | ||
| 5570 | m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size); | ||
| 5571 | } | ||
| 5572 | } | ||
| 5573 | }; | ||
| 5574 | |||
| 5575 | // Create ranges for both tables. | ||
| 5576 | ContiguousRangeInfo src_range(src_page_table, src_address, size); | ||
| 5577 | ContiguousRangeInfo dst_range(dst_page_table, dst_address, size); | ||
| 5578 | |||
| 5579 | // Validate the ranges. | ||
| 5580 | while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) { | ||
| 5581 | R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion); | ||
| 5582 | R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion); | ||
| 5583 | |||
| 5584 | src_range.ContinueTraversal(); | ||
| 5585 | dst_range.ContinueTraversal(); | ||
| 5586 | } | ||
| 5587 | } | ||
| 5588 | |||
| 5589 | // We no longer need to hold our lock on the source page table. | ||
| 5590 | lk.TryUnlockHalf(src_page_table.m_general_lock); | ||
| 5591 | |||
| 5592 | // Create an update allocator. | ||
| 5593 | Result allocator_result; | ||
| 5594 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 5595 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 5596 | R_TRY(allocator_result); | ||
| 5597 | |||
| 5598 | // We're going to perform an update, so create a helper. | ||
| 5599 | KScopedPageTableUpdater updater(this); | ||
| 5600 | |||
| 5601 | // Unmap the memory. | ||
| 5602 | const size_t num_pages = size / PageSize; | ||
| 5603 | const KPageProperties unmap_properties = {KMemoryPermission::None, false, false, | ||
| 5604 | DisableMergeAttribute::None}; | ||
| 5605 | R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties, | ||
| 5606 | OperationType::Unmap, false)); | ||
| 5607 | |||
| 5608 | // Apply the memory block update. | ||
| 5609 | m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, | ||
| 5610 | KMemoryState::Free, KMemoryPermission::None, | ||
| 5611 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 5612 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 5613 | |||
| 5614 | R_SUCCEED(); | ||
| 5615 | } | ||
| 5616 | |||
| 5617 | Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr, | ||
| 5618 | size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, | ||
| 5619 | const KPageProperties properties, OperationType operation, | ||
| 5620 | bool reuse_ll) { | ||
| 5621 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 5622 | ASSERT(num_pages > 0); | ||
| 5623 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | ||
| 5624 | ASSERT(this->ContainsPages(virt_addr, num_pages)); | ||
| 5625 | |||
| 5626 | // As we don't allocate page entries in guest memory, we don't need to allocate them from | ||
| 5627 | // or free them to the page list, and so it goes unused (along with page properties). | ||
| 5628 | |||
| 5629 | switch (operation) { | ||
| 5630 | case OperationType::Unmap: { | ||
| 5631 | // Ensure that any pages we track are closed on exit. | ||
| 5632 | KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); | ||
| 5633 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | ||
| 5634 | |||
| 5635 | // Make a page group representing the region to unmap. | ||
| 5636 | this->MakePageGroup(pages_to_close, virt_addr, num_pages); | ||
| 5637 | |||
| 5638 | // Unmap. | ||
| 5639 | m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); | ||
| 5640 | |||
| 5641 | R_SUCCEED(); | ||
| 5642 | } | ||
| 5643 | case OperationType::Map: { | ||
| 5644 | ASSERT(virt_addr != 0); | ||
| 5645 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | ||
| 5646 | m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr); | ||
| 5647 | |||
| 5648 | // Open references to pages, if we should. | ||
| 5649 | if (this->IsHeapPhysicalAddress(phys_addr)) { | ||
| 5650 | m_kernel.MemoryManager().Open(phys_addr, num_pages); | ||
| 5651 | } | ||
| 5652 | |||
| 5653 | R_SUCCEED(); | ||
| 5654 | } | ||
| 5655 | case OperationType::Separate: { | ||
| 5656 | // TODO: Unimplemented. | ||
| 5657 | R_SUCCEED(); | ||
| 5658 | } | ||
| 5659 | case OperationType::ChangePermissions: | ||
| 5660 | case OperationType::ChangePermissionsAndRefresh: | ||
| 5661 | case OperationType::ChangePermissionsAndRefreshAndFlush: | ||
| 5662 | R_SUCCEED(); | ||
| 5663 | default: | ||
| 5664 | UNREACHABLE(); | ||
| 5665 | } | ||
| 5666 | } | ||
| 5667 | |||
| 5668 | Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr, | ||
| 5669 | size_t num_pages, const KPageGroup& page_group, | ||
| 5670 | const KPageProperties properties, OperationType operation, | ||
| 5671 | bool reuse_ll) { | ||
| 5672 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 5673 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | ||
| 5674 | ASSERT(num_pages > 0); | ||
| 5675 | ASSERT(num_pages == page_group.GetNumPages()); | ||
| 5676 | |||
| 5677 | // As we don't allocate page entries in guest memory, we don't need to allocate them from | ||
| 5678 | // the page list, and so it goes unused (along with page properties). | ||
| 5679 | |||
| 5680 | switch (operation) { | ||
| 5681 | case OperationType::MapGroup: | ||
| 5682 | case OperationType::MapFirstGroup: { | ||
| 5683 | // We want to maintain a new reference to every page in the group. | ||
| 5684 | KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); | ||
| 5685 | |||
| 5686 | for (const auto& node : page_group) { | ||
| 5687 | const size_t size{node.GetNumPages() * PageSize}; | ||
| 5688 | |||
| 5689 | // Map the pages. | ||
| 5690 | m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress()); | ||
| 5691 | |||
| 5692 | virt_addr += size; | ||
| 5693 | } | ||
| 5694 | |||
| 5695 | // We succeeded! We want to persist the reference to the pages. | ||
| 5696 | spg.CancelClose(); | ||
| 5697 | |||
| 5698 | R_SUCCEED(); | ||
| 5699 | } | ||
| 5700 | default: | ||
| 5701 | UNREACHABLE(); | ||
| 5702 | } | ||
| 5703 | } | ||
| 5704 | |||
| 5705 | void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 5706 | while (page_list->Peek()) { | ||
| 5707 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 5708 | |||
| 5709 | // TODO: Free page entries once they are allocated in guest memory. | ||
| 5710 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 5711 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 5712 | // this->GetPageTableManager().Free(page); | ||
| 5713 | } | ||
| 5714 | } | ||
| 5715 | |||
| 5716 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h new file mode 100644 index 000000000..ee2c41e67 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_base.h | |||
| @@ -0,0 +1,759 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <memory> | ||
| 7 | |||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/page_table.h" | ||
| 10 | #include "core/core.h" | ||
| 11 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 12 | #include "core/hle/kernel/k_light_lock.h" | ||
| 13 | #include "core/hle/kernel/k_memory_block.h" | ||
| 14 | #include "core/hle/kernel/k_memory_block_manager.h" | ||
| 15 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 16 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 17 | #include "core/hle/kernel/k_typed_address.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | ||
| 19 | #include "core/hle/result.h" | ||
| 20 | #include "core/memory.h" | ||
| 21 | |||
| 22 | namespace Kernel { | ||
| 23 | |||
| 24 | enum class DisableMergeAttribute : u8 { | ||
| 25 | None = (0U << 0), | ||
| 26 | |||
| 27 | DisableHead = (1U << 0), | ||
| 28 | DisableHeadAndBody = (1U << 1), | ||
| 29 | EnableHeadAndBody = (1U << 2), | ||
| 30 | DisableTail = (1U << 3), | ||
| 31 | EnableTail = (1U << 4), | ||
| 32 | EnableAndMergeHeadBodyTail = (1U << 5), | ||
| 33 | |||
| 34 | EnableHeadBodyTail = EnableHeadAndBody | EnableTail, | ||
| 35 | DisableHeadBodyTail = DisableHeadAndBody | DisableTail, | ||
| 36 | }; | ||
| 37 | DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute); | ||
| 38 | |||
| 39 | struct KPageProperties { | ||
| 40 | KMemoryPermission perm; | ||
| 41 | bool io; | ||
| 42 | bool uncached; | ||
| 43 | DisableMergeAttribute disable_merge_attributes; | ||
| 44 | }; | ||
| 45 | static_assert(std::is_trivial_v<KPageProperties>); | ||
| 46 | static_assert(sizeof(KPageProperties) == sizeof(u32)); | ||
| 47 | |||
| 48 | class KResourceLimit; | ||
| 49 | class KSystemResource; | ||
| 50 | |||
| 51 | class KPageTableBase { | ||
| 52 | YUZU_NON_COPYABLE(KPageTableBase); | ||
| 53 | YUZU_NON_MOVEABLE(KPageTableBase); | ||
| 54 | |||
| 55 | public: | ||
| 56 | using TraversalEntry = Common::PageTable::TraversalEntry; | ||
| 57 | using TraversalContext = Common::PageTable::TraversalContext; | ||
| 58 | |||
| 59 | class MemoryRange { | ||
| 60 | private: | ||
| 61 | KernelCore& m_kernel; | ||
| 62 | KPhysicalAddress m_address; | ||
| 63 | size_t m_size; | ||
| 64 | bool m_heap; | ||
| 65 | |||
| 66 | public: | ||
| 67 | explicit MemoryRange(KernelCore& kernel) | ||
| 68 | : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {} | ||
| 69 | |||
| 70 | void Set(KPhysicalAddress address, size_t size, bool heap) { | ||
| 71 | m_address = address; | ||
| 72 | m_size = size; | ||
| 73 | m_heap = heap; | ||
| 74 | } | ||
| 75 | |||
| 76 | KPhysicalAddress GetAddress() const { | ||
| 77 | return m_address; | ||
| 78 | } | ||
| 79 | size_t GetSize() const { | ||
| 80 | return m_size; | ||
| 81 | } | ||
| 82 | bool IsHeap() const { | ||
| 83 | return m_heap; | ||
| 84 | } | ||
| 85 | |||
| 86 | void Open(); | ||
| 87 | void Close(); | ||
| 88 | }; | ||
| 89 | |||
| 90 | protected: | ||
| 91 | enum MemoryFillValue : u8 { | ||
| 92 | MemoryFillValue_Zero = 0, | ||
| 93 | MemoryFillValue_Stack = 'X', | ||
| 94 | MemoryFillValue_Ipc = 'Y', | ||
| 95 | MemoryFillValue_Heap = 'Z', | ||
| 96 | }; | ||
| 97 | |||
| 98 | enum class OperationType { | ||
| 99 | Map = 0, | ||
| 100 | MapGroup = 1, | ||
| 101 | MapFirstGroup = 2, | ||
| 102 | Unmap = 3, | ||
| 103 | ChangePermissions = 4, | ||
| 104 | ChangePermissionsAndRefresh = 5, | ||
| 105 | ChangePermissionsAndRefreshAndFlush = 6, | ||
| 106 | Separate = 7, | ||
| 107 | }; | ||
| 108 | |||
| 109 | static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; | ||
| 110 | static constexpr size_t RegionAlignment = 2_MiB; | ||
| 111 | static_assert(RegionAlignment == KernelAslrAlignment); | ||
| 112 | |||
| 113 | struct PageLinkedList { | ||
| 114 | private: | ||
| 115 | struct Node { | ||
| 116 | Node* m_next; | ||
| 117 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 118 | }; | ||
| 119 | static_assert(std::is_trivial_v<Node>); | ||
| 120 | |||
| 121 | private: | ||
| 122 | Node* m_root{}; | ||
| 123 | |||
| 124 | public: | ||
| 125 | constexpr PageLinkedList() : m_root(nullptr) {} | ||
| 126 | |||
| 127 | void Push(Node* n) { | ||
| 128 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 129 | n->m_next = m_root; | ||
| 130 | m_root = n; | ||
| 131 | } | ||
| 132 | |||
| 133 | Node* Peek() const { | ||
| 134 | return m_root; | ||
| 135 | } | ||
| 136 | |||
| 137 | Node* Pop() { | ||
| 138 | Node* const r = m_root; | ||
| 139 | |||
| 140 | m_root = r->m_next; | ||
| 141 | r->m_next = nullptr; | ||
| 142 | |||
| 143 | return r; | ||
| 144 | } | ||
| 145 | }; | ||
| 146 | static_assert(std::is_trivially_destructible_v<PageLinkedList>); | ||
| 147 | |||
| 148 | static constexpr auto DefaultMemoryIgnoreAttr = | ||
| 149 | KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; | ||
| 150 | |||
| 151 | static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) { | ||
| 152 | switch (static_cast<Svc::CreateProcessFlag>(as_type & | ||
| 153 | Svc::CreateProcessFlag::AddressSpaceMask)) { | ||
| 154 | case Svc::CreateProcessFlag::AddressSpace64Bit: | ||
| 155 | return 39; | ||
| 156 | case Svc::CreateProcessFlag::AddressSpace64BitDeprecated: | ||
| 157 | return 36; | ||
| 158 | case Svc::CreateProcessFlag::AddressSpace32Bit: | ||
| 159 | case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias: | ||
| 160 | return 32; | ||
| 161 | default: | ||
| 162 | UNREACHABLE(); | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | private: | ||
| 167 | class KScopedPageTableUpdater { | ||
| 168 | private: | ||
| 169 | KPageTableBase* m_pt; | ||
| 170 | PageLinkedList m_ll; | ||
| 171 | |||
| 172 | public: | ||
| 173 | explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {} | ||
| 174 | explicit KScopedPageTableUpdater(KPageTableBase& pt) | ||
| 175 | : KScopedPageTableUpdater(std::addressof(pt)) {} | ||
| 176 | ~KScopedPageTableUpdater() { | ||
| 177 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 178 | } | ||
| 179 | |||
| 180 | PageLinkedList* GetPageList() { | ||
| 181 | return std::addressof(m_ll); | ||
| 182 | } | ||
| 183 | }; | ||
| 184 | |||
| 185 | private: | ||
| 186 | KernelCore& m_kernel; | ||
| 187 | Core::System& m_system; | ||
| 188 | KProcessAddress m_address_space_start{}; | ||
| 189 | KProcessAddress m_address_space_end{}; | ||
| 190 | KProcessAddress m_heap_region_start{}; | ||
| 191 | KProcessAddress m_heap_region_end{}; | ||
| 192 | KProcessAddress m_current_heap_end{}; | ||
| 193 | KProcessAddress m_alias_region_start{}; | ||
| 194 | KProcessAddress m_alias_region_end{}; | ||
| 195 | KProcessAddress m_stack_region_start{}; | ||
| 196 | KProcessAddress m_stack_region_end{}; | ||
| 197 | KProcessAddress m_kernel_map_region_start{}; | ||
| 198 | KProcessAddress m_kernel_map_region_end{}; | ||
| 199 | KProcessAddress m_alias_code_region_start{}; | ||
| 200 | KProcessAddress m_alias_code_region_end{}; | ||
| 201 | KProcessAddress m_code_region_start{}; | ||
| 202 | KProcessAddress m_code_region_end{}; | ||
| 203 | size_t m_max_heap_size{}; | ||
| 204 | size_t m_mapped_physical_memory_size{}; | ||
| 205 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 206 | size_t m_mapped_insecure_memory{}; | ||
| 207 | size_t m_mapped_ipc_server_memory{}; | ||
| 208 | mutable KLightLock m_general_lock; | ||
| 209 | mutable KLightLock m_map_physical_memory_lock; | ||
| 210 | KLightLock m_device_map_lock; | ||
| 211 | std::unique_ptr<Common::PageTable> m_impl{}; | ||
| 212 | Core::Memory::Memory* m_memory{}; | ||
| 213 | KMemoryBlockManager m_memory_block_manager{}; | ||
| 214 | u32 m_allocate_option{}; | ||
| 215 | u32 m_address_space_width{}; | ||
| 216 | bool m_is_kernel{}; | ||
| 217 | bool m_enable_aslr{}; | ||
| 218 | bool m_enable_device_address_space_merge{}; | ||
| 219 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | ||
| 220 | KBlockInfoManager* m_block_info_manager{}; | ||
| 221 | KResourceLimit* m_resource_limit{}; | ||
| 222 | const KMemoryRegion* m_cached_physical_linear_region{}; | ||
| 223 | const KMemoryRegion* m_cached_physical_heap_region{}; | ||
| 224 | MemoryFillValue m_heap_fill_value{}; | ||
| 225 | MemoryFillValue m_ipc_fill_value{}; | ||
| 226 | MemoryFillValue m_stack_fill_value{}; | ||
| 227 | |||
| 228 | public: | ||
| 229 | explicit KPageTableBase(KernelCore& kernel); | ||
| 230 | ~KPageTableBase(); | ||
| 231 | |||
| 232 | Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end, | ||
| 233 | Core::Memory::Memory& memory); | ||
| 234 | Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr, | ||
| 235 | bool enable_device_address_space_merge, bool from_back, | ||
| 236 | KMemoryManager::Pool pool, KProcessAddress code_address, | ||
| 237 | size_t code_size, KSystemResource* system_resource, | ||
| 238 | KResourceLimit* resource_limit, Core::Memory::Memory& memory); | ||
| 239 | |||
| 240 | void Finalize(); | ||
| 241 | |||
| 242 | bool IsKernel() const { | ||
| 243 | return m_is_kernel; | ||
| 244 | } | ||
| 245 | bool IsAslrEnabled() const { | ||
| 246 | return m_enable_aslr; | ||
| 247 | } | ||
| 248 | |||
| 249 | bool Contains(KProcessAddress addr) const { | ||
| 250 | return m_address_space_start <= addr && addr <= m_address_space_end - 1; | ||
| 251 | } | ||
| 252 | |||
| 253 | bool Contains(KProcessAddress addr, size_t size) const { | ||
| 254 | return m_address_space_start <= addr && addr < addr + size && | ||
| 255 | addr + size - 1 <= m_address_space_end - 1; | ||
| 256 | } | ||
| 257 | |||
| 258 | bool IsInAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 259 | return this->Contains(addr, size) && m_alias_region_start <= addr && | ||
| 260 | addr + size - 1 <= m_alias_region_end - 1; | ||
| 261 | } | ||
| 262 | |||
| 263 | bool IsInHeapRegion(KProcessAddress addr, size_t size) const { | ||
| 264 | return this->Contains(addr, size) && m_heap_region_start <= addr && | ||
| 265 | addr + size - 1 <= m_heap_region_end - 1; | ||
| 266 | } | ||
| 267 | |||
| 268 | bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 269 | // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the | ||
| 270 | // alias code region. | ||
| 271 | return this->CanContain(addr, size, Svc::MemoryState::AliasCode); | ||
| 272 | } | ||
| 273 | |||
| 274 | KScopedLightLock AcquireDeviceMapLock() { | ||
| 275 | return KScopedLightLock(m_device_map_lock); | ||
| 276 | } | ||
| 277 | |||
| 278 | KProcessAddress GetRegionAddress(Svc::MemoryState state) const; | ||
| 279 | size_t GetRegionSize(Svc::MemoryState state) const; | ||
| 280 | bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; | ||
| 281 | |||
| 282 | KProcessAddress GetRegionAddress(KMemoryState state) const { | ||
| 283 | return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 284 | } | ||
| 285 | size_t GetRegionSize(KMemoryState state) const { | ||
| 286 | return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 287 | } | ||
| 288 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 289 | return this->CanContain(addr, size, | ||
| 290 | static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); | ||
| 291 | } | ||
| 292 | |||
| 293 | public: | ||
| 294 | Core::Memory::Memory& GetMemory() { | ||
| 295 | return *m_memory; | ||
| 296 | } | ||
| 297 | |||
| 298 | Core::Memory::Memory& GetMemory() const { | ||
| 299 | return *m_memory; | ||
| 300 | } | ||
| 301 | |||
| 302 | Common::PageTable& GetImpl() { | ||
| 303 | return *m_impl; | ||
| 304 | } | ||
| 305 | |||
| 306 | Common::PageTable& GetImpl() const { | ||
| 307 | return *m_impl; | ||
| 308 | } | ||
| 309 | |||
| 310 | size_t GetNumGuardPages() const { | ||
| 311 | return this->IsKernel() ? 1 : 4; | ||
| 312 | } | ||
| 313 | |||
| 314 | protected: | ||
| 315 | // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions | ||
| 316 | // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived | ||
| 317 | // class, and this avoids unnecessary virtual function calls. | ||
| 318 | Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages, | ||
| 319 | KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, | ||
| 320 | OperationType operation, bool reuse_ll); | ||
| 321 | Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages, | ||
| 322 | const KPageGroup& page_group, const KPageProperties properties, | ||
| 323 | OperationType operation, bool reuse_ll); | ||
| 324 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 325 | |||
| 326 | bool IsLockedByCurrentThread() const { | ||
| 327 | return m_general_lock.IsLockedByCurrentThread(); | ||
| 328 | } | ||
| 329 | |||
| 330 | bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) { | ||
| 331 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 332 | |||
| 333 | return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress( | ||
| 334 | m_cached_physical_linear_region, phys_addr); | ||
| 335 | } | ||
| 336 | |||
| 337 | bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { | ||
| 338 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 339 | |||
| 340 | return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress( | ||
| 341 | m_cached_physical_linear_region, phys_addr, size); | ||
| 342 | } | ||
| 343 | |||
| 344 | bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { | ||
| 345 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 346 | |||
| 347 | return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, | ||
| 348 | phys_addr); | ||
| 349 | } | ||
| 350 | |||
| 351 | bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { | ||
| 352 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 353 | |||
| 354 | return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, | ||
| 355 | phys_addr, size); | ||
| 356 | } | ||
| 357 | |||
| 358 | bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) { | ||
| 359 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 360 | |||
| 361 | return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region, | ||
| 362 | phys_addr); | ||
| 363 | } | ||
| 364 | |||
| 365 | bool ContainsPages(KProcessAddress addr, size_t num_pages) const { | ||
| 366 | return (m_address_space_start <= addr) && | ||
| 367 | (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && | ||
| 368 | (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); | ||
| 369 | } | ||
| 370 | |||
| 371 | private: | ||
| 372 | KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, | ||
| 373 | size_t num_pages, size_t alignment, size_t offset, | ||
| 374 | size_t guard_pages) const; | ||
| 375 | |||
| 376 | Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 377 | KMemoryState state_mask, KMemoryState state, | ||
| 378 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 379 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 380 | Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 381 | KMemoryState state, KMemoryPermission perm_mask, | ||
| 382 | KMemoryPermission perm, KMemoryAttribute attr_mask, | ||
| 383 | KMemoryAttribute attr) const { | ||
| 384 | R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, | ||
| 385 | perm, attr_mask, attr)); | ||
| 386 | } | ||
| 387 | |||
| 388 | Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, | ||
| 389 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 390 | KMemoryAttribute attr_mask, KMemoryAttribute attr) const; | ||
| 391 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 392 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 393 | KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, | ||
| 394 | KMemoryState state_mask, KMemoryState state, | ||
| 395 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 396 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 397 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 398 | Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, | ||
| 399 | KMemoryAttribute* out_attr, size_t* out_blocks_needed, | ||
| 400 | KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 401 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 402 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 403 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; | ||
| 404 | Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size, | ||
| 405 | KMemoryState state_mask, KMemoryState state, | ||
| 406 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 407 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 408 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 409 | R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | ||
| 410 | state_mask, state, perm_mask, perm, attr_mask, attr, | ||
| 411 | ignore_attr)); | ||
| 412 | } | ||
| 413 | Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 414 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 415 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 416 | KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { | ||
| 417 | R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, | ||
| 418 | attr_mask, attr, ignore_attr)); | ||
| 419 | } | ||
| 420 | |||
| 421 | Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr, | ||
| 422 | size_t size, KMemoryState state_mask, KMemoryState state, | ||
| 423 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 424 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 425 | KMemoryPermission new_perm, KMemoryAttribute lock_attr); | ||
| 426 | Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask, | ||
| 427 | KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 428 | KMemoryAttribute attr_mask, KMemoryAttribute attr, | ||
| 429 | KMemoryPermission new_perm, KMemoryAttribute lock_attr, | ||
| 430 | const KPageGroup* pg); | ||
| 431 | |||
| 432 | Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page, | ||
| 433 | KProcessAddress address) const; | ||
| 434 | |||
| 435 | Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size, | ||
| 436 | Svc::MemoryState state) const; | ||
| 437 | |||
| 438 | Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 439 | size_t num_pages, KMemoryPermission perm); | ||
| 440 | Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address, | ||
| 441 | const KPageGroup& pg, const KPageProperties properties, bool reuse_ll); | ||
| 442 | |||
| 443 | void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, | ||
| 444 | const KPageGroup& pg); | ||
| 445 | |||
| 446 | Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 447 | bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages); | ||
| 448 | |||
| 449 | Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size, | ||
| 450 | KMemoryState state_mask, KMemoryState state, | ||
| 451 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 452 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | ||
| 453 | |||
| 454 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 455 | KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, | ||
| 456 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm); | ||
| 457 | |||
| 458 | Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr, | ||
| 459 | size_t size, KMemoryState state, KMemoryPermission perm); | ||
| 460 | Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size, | ||
| 461 | KMemoryState state); | ||
| 462 | Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size, | ||
| 463 | KMemoryState state); | ||
| 464 | |||
| 465 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 466 | KProcessAddress address, size_t size, KMemoryPermission test_perm, | ||
| 467 | KMemoryState dst_state); | ||
| 468 | Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr, | ||
| 469 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 470 | KPageTableBase& src_page_table, bool send); | ||
| 471 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address, | ||
| 472 | size_t size, KMemoryPermission prot_perm); | ||
| 473 | |||
| 474 | size_t GetSize(KMemoryState state) const; | ||
| 475 | |||
| 476 | bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const { | ||
| 477 | // Validate pre-conditions. | ||
| 478 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 479 | |||
| 480 | return this->GetImpl().GetPhysicalAddress(out, virt_addr); | ||
| 481 | } | ||
| 482 | |||
| 483 | public: | ||
| 484 | bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const { | ||
| 485 | // Validate pre-conditions. | ||
| 486 | ASSERT(!this->IsLockedByCurrentThread()); | ||
| 487 | |||
| 488 | // Acquire exclusive access to the table while doing address translation. | ||
| 489 | KScopedLightLock lk(m_general_lock); | ||
| 490 | |||
| 491 | return this->GetPhysicalAddressLocked(out, virt_addr); | ||
| 492 | } | ||
| 493 | |||
| 494 | KBlockInfoManager* GetBlockInfoManager() const { | ||
| 495 | return m_block_info_manager; | ||
| 496 | } | ||
| 497 | |||
| 498 | Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm); | ||
| 499 | Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 500 | Svc::MemoryPermission perm); | ||
| 501 | Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, | ||
| 502 | KMemoryAttribute attr); | ||
| 503 | Result SetHeapSize(KProcessAddress* out, size_t size); | ||
| 504 | Result SetMaxHeapSize(size_t size); | ||
| 505 | Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, | ||
| 506 | KProcessAddress addr) const; | ||
| 507 | Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const; | ||
| 508 | Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const { | ||
| 509 | R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static)); | ||
| 510 | } | ||
| 511 | Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const { | ||
| 512 | R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io)); | ||
| 513 | } | ||
| 514 | Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 515 | Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 516 | Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 517 | Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 518 | Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); | ||
| 519 | Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 520 | Svc::MemoryMapping mapping, Svc::MemoryPermission perm); | ||
| 521 | Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 522 | Svc::MemoryMapping mapping); | ||
| 523 | Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); | ||
| 524 | Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm); | ||
| 525 | Result MapInsecureMemory(KProcessAddress address, size_t size); | ||
| 526 | Result UnmapInsecureMemory(KProcessAddress address, size_t size); | ||
| 527 | |||
| 528 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 529 | KPhysicalAddress phys_addr, KProcessAddress region_start, | ||
| 530 | size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { | ||
| 531 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, | ||
| 532 | region_num_pages, state, perm)); | ||
| 533 | } | ||
| 534 | |||
| 535 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 536 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 537 | R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 538 | this->GetRegionAddress(state), | ||
| 539 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 540 | } | ||
| 541 | |||
| 542 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 543 | KMemoryPermission perm) { | ||
| 544 | R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false, | ||
| 545 | this->GetRegionAddress(state), | ||
| 546 | this->GetRegionSize(state) / PageSize, state, perm)); | ||
| 547 | } | ||
| 548 | |||
| 549 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 550 | KMemoryPermission perm); | ||
| 551 | Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); | ||
| 552 | |||
| 553 | Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, | ||
| 554 | KProcessAddress region_start, size_t region_num_pages, KMemoryState state, | ||
| 555 | KMemoryPermission perm); | ||
| 556 | Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state, | ||
| 557 | KMemoryPermission perm); | ||
| 558 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state); | ||
| 559 | |||
| 560 | Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 561 | KMemoryState state_mask, KMemoryState state, | ||
| 562 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 563 | KMemoryAttribute attr_mask, KMemoryAttribute attr); | ||
| 564 | |||
| 565 | Result InvalidateProcessDataCache(KProcessAddress address, size_t size); | ||
| 566 | Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size); | ||
| 567 | |||
| 568 | Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 569 | Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 570 | KMemoryState state); | ||
| 571 | |||
| 572 | Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); | ||
| 573 | Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 574 | KMemoryState state); | ||
| 575 | |||
| 576 | Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, | ||
| 577 | KMemoryPermission perm, bool is_aligned, bool check_heap); | ||
| 578 | Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap); | ||
| 579 | |||
| 580 | Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size); | ||
| 581 | Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size); | ||
| 582 | |||
| 583 | Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 584 | KProcessAddress address, size_t size, | ||
| 585 | KMemoryPermission perm, bool is_aligned); | ||
| 586 | Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address, | ||
| 587 | size_t size); | ||
| 588 | |||
| 589 | Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size); | ||
| 590 | Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); | ||
| 591 | |||
| 592 | Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 593 | KMemoryPermission perm); | ||
| 594 | Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg); | ||
| 595 | Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size); | ||
| 596 | Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg); | ||
| 597 | |||
| 598 | Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address, | ||
| 599 | size_t size); | ||
| 600 | |||
| 601 | Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, | ||
| 602 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 603 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 604 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 605 | Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr, | ||
| 606 | KMemoryState src_state_mask, KMemoryState src_state, | ||
| 607 | KMemoryPermission src_test_perm, | ||
| 608 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 609 | Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, | ||
| 610 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 611 | KMemoryPermission dst_test_perm, | ||
| 612 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 613 | KProcessAddress src_addr); | ||
| 614 | Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, | ||
| 615 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 616 | KMemoryPermission dst_test_perm, | ||
| 617 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 618 | void* buffer); | ||
| 619 | Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr, | ||
| 620 | size_t size, KMemoryState dst_state_mask, | ||
| 621 | KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 622 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 623 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 624 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 625 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 626 | Result CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 627 | KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 628 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 629 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 630 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 631 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr); | ||
| 632 | |||
| 633 | Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 634 | KPageTableBase& src_page_table, KMemoryPermission test_perm, | ||
| 635 | KMemoryState dst_state, bool send); | ||
| 636 | Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 637 | Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); | ||
| 638 | |||
| 639 | Result MapPhysicalMemory(KProcessAddress address, size_t size); | ||
| 640 | Result UnmapPhysicalMemory(KProcessAddress address, size_t size); | ||
| 641 | |||
| 642 | Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); | ||
| 643 | Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); | ||
| 644 | |||
| 645 | Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt, | ||
| 646 | KProcessAddress src_address); | ||
| 647 | |||
| 648 | public: | ||
| 649 | KProcessAddress GetAddressSpaceStart() const { | ||
| 650 | return m_address_space_start; | ||
| 651 | } | ||
| 652 | KProcessAddress GetHeapRegionStart() const { | ||
| 653 | return m_heap_region_start; | ||
| 654 | } | ||
| 655 | KProcessAddress GetAliasRegionStart() const { | ||
| 656 | return m_alias_region_start; | ||
| 657 | } | ||
| 658 | KProcessAddress GetStackRegionStart() const { | ||
| 659 | return m_stack_region_start; | ||
| 660 | } | ||
| 661 | KProcessAddress GetKernelMapRegionStart() const { | ||
| 662 | return m_kernel_map_region_start; | ||
| 663 | } | ||
| 664 | KProcessAddress GetCodeRegionStart() const { | ||
| 665 | return m_code_region_start; | ||
| 666 | } | ||
| 667 | KProcessAddress GetAliasCodeRegionStart() const { | ||
| 668 | return m_alias_code_region_start; | ||
| 669 | } | ||
| 670 | |||
| 671 | size_t GetAddressSpaceSize() const { | ||
| 672 | return m_address_space_end - m_address_space_start; | ||
| 673 | } | ||
| 674 | size_t GetHeapRegionSize() const { | ||
| 675 | return m_heap_region_end - m_heap_region_start; | ||
| 676 | } | ||
| 677 | size_t GetAliasRegionSize() const { | ||
| 678 | return m_alias_region_end - m_alias_region_start; | ||
| 679 | } | ||
| 680 | size_t GetStackRegionSize() const { | ||
| 681 | return m_stack_region_end - m_stack_region_start; | ||
| 682 | } | ||
| 683 | size_t GetKernelMapRegionSize() const { | ||
| 684 | return m_kernel_map_region_end - m_kernel_map_region_start; | ||
| 685 | } | ||
| 686 | size_t GetCodeRegionSize() const { | ||
| 687 | return m_code_region_end - m_code_region_start; | ||
| 688 | } | ||
| 689 | size_t GetAliasCodeRegionSize() const { | ||
| 690 | return m_alias_code_region_end - m_alias_code_region_start; | ||
| 691 | } | ||
| 692 | |||
| 693 | size_t GetNormalMemorySize() const { | ||
| 694 | // Lock the table. | ||
| 695 | KScopedLightLock lk(m_general_lock); | ||
| 696 | |||
| 697 | return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size; | ||
| 698 | } | ||
| 699 | |||
| 700 | size_t GetCodeSize() const; | ||
| 701 | size_t GetCodeDataSize() const; | ||
| 702 | size_t GetAliasCodeSize() const; | ||
| 703 | size_t GetAliasCodeDataSize() const; | ||
| 704 | |||
| 705 | u32 GetAllocateOption() const { | ||
| 706 | return m_allocate_option; | ||
| 707 | } | ||
| 708 | |||
| 709 | u32 GetAddressSpaceWidth() const { | ||
| 710 | return m_address_space_width; | ||
| 711 | } | ||
| 712 | |||
| 713 | public: | ||
| 714 | // Linear mapped | ||
| 715 | static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) { | ||
| 716 | return kernel.System().DeviceMemory().GetPointer<u8>(addr); | ||
| 717 | } | ||
| 718 | |||
| 719 | static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel, | ||
| 720 | KVirtualAddress addr) { | ||
| 721 | return kernel.MemoryLayout().GetLinearPhysicalAddress(addr); | ||
| 722 | } | ||
| 723 | |||
| 724 | static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel, | ||
| 725 | KPhysicalAddress addr) { | ||
| 726 | return kernel.MemoryLayout().GetLinearVirtualAddress(addr); | ||
| 727 | } | ||
| 728 | |||
| 729 | // Heap | ||
| 730 | static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) { | ||
| 731 | return kernel.System().DeviceMemory().GetPointer<u8>(addr); | ||
| 732 | } | ||
| 733 | |||
| 734 | static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) { | ||
| 735 | return GetLinearMappedPhysicalAddress(kernel, addr); | ||
| 736 | } | ||
| 737 | |||
| 738 | static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) { | ||
| 739 | return GetLinearMappedVirtualAddress(kernel, addr); | ||
| 740 | } | ||
| 741 | |||
| 742 | // Member heap | ||
| 743 | u8* GetHeapVirtualPointer(KPhysicalAddress addr) { | ||
| 744 | return GetHeapVirtualPointer(m_kernel, addr); | ||
| 745 | } | ||
| 746 | |||
| 747 | KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { | ||
| 748 | return GetHeapPhysicalAddress(m_kernel, addr); | ||
| 749 | } | ||
| 750 | |||
| 751 | KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { | ||
| 752 | return GetHeapVirtualAddress(m_kernel, addr); | ||
| 753 | } | ||
| 754 | |||
| 755 | // TODO: GetPageTableVirtualAddress | ||
| 756 | // TODO: GetPageTablePhysicalAddress | ||
| 757 | }; | ||
| 758 | |||
| 759 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 1f4b0755d..3cfb414e5 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -298,9 +298,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa | |||
| 298 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); | 298 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); |
| 299 | const bool enable_das_merge = | 299 | const bool enable_das_merge = |
| 300 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); | 300 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); |
| 301 | R_TRY(m_page_table.InitializeForProcess( | 301 | R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, |
| 302 | as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, | 302 | params.code_address, params.code_num_pages * PageSize, |
| 303 | params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory())); | 303 | m_system_resource, res_limit, this->GetMemory())); |
| 304 | } | 304 | } |
| 305 | ON_RESULT_FAILURE_2 { | 305 | ON_RESULT_FAILURE_2 { |
| 306 | m_page_table.Finalize(); | 306 | m_page_table.Finalize(); |
| @@ -391,9 +391,9 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, | |||
| 391 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); | 391 | const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr); |
| 392 | const bool enable_das_merge = | 392 | const bool enable_das_merge = |
| 393 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); | 393 | False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge); |
| 394 | R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, | 394 | R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, |
| 395 | !enable_aslr, pool, params.code_address, code_size, | 395 | params.code_address, code_size, m_system_resource, res_limit, |
| 396 | m_system_resource, res_limit, this->GetMemory())); | 396 | this->GetMemory())); |
| 397 | } | 397 | } |
| 398 | ON_RESULT_FAILURE_2 { | 398 | ON_RESULT_FAILURE_2 { |
| 399 | m_page_table.Finalize(); | 399 | m_page_table.Finalize(); |
| @@ -1122,9 +1122,9 @@ Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ | |||
| 1122 | void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} | 1122 | void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} |
| 1123 | 1123 | ||
| 1124 | KProcess::KProcess(KernelCore& kernel) | 1124 | KProcess::KProcess(KernelCore& kernel) |
| 1125 | : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()}, | 1125 | : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, |
| 1126 | m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()}, | 1126 | m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, |
| 1127 | m_address_arbiter{kernel.System()}, m_handle_table{kernel} {} | 1127 | m_handle_table{kernel} {} |
| 1128 | KProcess::~KProcess() = default; | 1128 | KProcess::~KProcess() = default; |
| 1129 | 1129 | ||
| 1130 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, | 1130 | Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, |
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index f9f755afa..8339465fd 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h | |||
| @@ -5,13 +5,14 @@ | |||
| 5 | 5 | ||
| 6 | #include <map> | 6 | #include <map> |
| 7 | 7 | ||
| 8 | #include "core/file_sys/program_metadata.h" | ||
| 8 | #include "core/hle/kernel/code_set.h" | 9 | #include "core/hle/kernel/code_set.h" |
| 9 | #include "core/hle/kernel/k_address_arbiter.h" | 10 | #include "core/hle/kernel/k_address_arbiter.h" |
| 10 | #include "core/hle/kernel/k_capabilities.h" | 11 | #include "core/hle/kernel/k_capabilities.h" |
| 11 | #include "core/hle/kernel/k_condition_variable.h" | 12 | #include "core/hle/kernel/k_condition_variable.h" |
| 12 | #include "core/hle/kernel/k_handle_table.h" | 13 | #include "core/hle/kernel/k_handle_table.h" |
| 13 | #include "core/hle/kernel/k_page_table.h" | ||
| 14 | #include "core/hle/kernel/k_page_table_manager.h" | 14 | #include "core/hle/kernel/k_page_table_manager.h" |
| 15 | #include "core/hle/kernel/k_process_page_table.h" | ||
| 15 | #include "core/hle/kernel/k_system_resource.h" | 16 | #include "core/hle/kernel/k_system_resource.h" |
| 16 | #include "core/hle/kernel/k_thread.h" | 17 | #include "core/hle/kernel/k_thread.h" |
| 17 | #include "core/hle/kernel/k_thread_local_page.h" | 18 | #include "core/hle/kernel/k_thread_local_page.h" |
| @@ -65,7 +66,7 @@ private: | |||
| 65 | using TLPIterator = TLPTree::iterator; | 66 | using TLPIterator = TLPTree::iterator; |
| 66 | 67 | ||
| 67 | private: | 68 | private: |
| 68 | KPageTable m_page_table; | 69 | KProcessPageTable m_page_table; |
| 69 | std::atomic<size_t> m_used_kernel_memory_size{}; | 70 | std::atomic<size_t> m_used_kernel_memory_size{}; |
| 70 | TLPTree m_fully_used_tlp_tree{}; | 71 | TLPTree m_fully_used_tlp_tree{}; |
| 71 | TLPTree m_partially_used_tlp_tree{}; | 72 | TLPTree m_partially_used_tlp_tree{}; |
| @@ -254,9 +255,8 @@ public: | |||
| 254 | return m_is_hbl; | 255 | return m_is_hbl; |
| 255 | } | 256 | } |
| 256 | 257 | ||
| 257 | Kernel::KMemoryManager::Direction GetAllocateOption() const { | 258 | u32 GetAllocateOption() const { |
| 258 | // TODO: property of the KPageTableBase | 259 | return m_page_table.GetAllocateOption(); |
| 259 | return KMemoryManager::Direction::FromFront; | ||
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | ThreadList& GetThreadList() { | 262 | ThreadList& GetThreadList() { |
| @@ -295,10 +295,10 @@ public: | |||
| 295 | return m_list_lock; | 295 | return m_list_lock; |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | KPageTable& GetPageTable() { | 298 | KProcessPageTable& GetPageTable() { |
| 299 | return m_page_table; | 299 | return m_page_table; |
| 300 | } | 300 | } |
| 301 | const KPageTable& GetPageTable() const { | 301 | const KProcessPageTable& GetPageTable() const { |
| 302 | return m_page_table; | 302 | return m_page_table; |
| 303 | } | 303 | } |
| 304 | 304 | ||
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h new file mode 100644 index 000000000..b7ae5abd0 --- /dev/null +++ b/src/core/hle/kernel/k_process_page_table.h | |||
| @@ -0,0 +1,480 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "core/hle/kernel/k_page_table.h" | ||
| 7 | #include "core/hle/kernel/k_scoped_lock.h" | ||
| 8 | #include "core/hle/kernel/svc_types.h" | ||
| 9 | |||
| 10 | namespace Core { | ||
| 11 | class ARM_Interface; | ||
| 12 | } | ||
| 13 | |||
| 14 | namespace Kernel { | ||
| 15 | |||
| 16 | class KProcessPageTable { | ||
| 17 | private: | ||
| 18 | KPageTable m_page_table; | ||
| 19 | |||
| 20 | public: | ||
| 21 | KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {} | ||
| 22 | |||
| 23 | Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, | ||
| 24 | bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, | ||
| 25 | size_t code_size, KSystemResource* system_resource, | ||
| 26 | KResourceLimit* resource_limit, Core::Memory::Memory& memory) { | ||
| 27 | R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, | ||
| 28 | from_back, pool, code_address, code_size, | ||
| 29 | system_resource, resource_limit, memory)); | ||
| 30 | } | ||
| 31 | |||
| 32 | void Finalize() { | ||
| 33 | m_page_table.Finalize(); | ||
| 34 | } | ||
| 35 | |||
| 36 | Core::Memory::Memory& GetMemory() { | ||
| 37 | return m_page_table.GetMemory(); | ||
| 38 | } | ||
| 39 | |||
| 40 | Core::Memory::Memory& GetMemory() const { | ||
| 41 | return m_page_table.GetMemory(); | ||
| 42 | } | ||
| 43 | |||
| 44 | Common::PageTable& GetImpl() { | ||
| 45 | return m_page_table.GetImpl(); | ||
| 46 | } | ||
| 47 | |||
| 48 | Common::PageTable& GetImpl() const { | ||
| 49 | return m_page_table.GetImpl(); | ||
| 50 | } | ||
| 51 | |||
| 52 | size_t GetNumGuardPages() const { | ||
| 53 | return m_page_table.GetNumGuardPages(); | ||
| 54 | } | ||
| 55 | |||
| 56 | KScopedLightLock AcquireDeviceMapLock() { | ||
| 57 | return m_page_table.AcquireDeviceMapLock(); | ||
| 58 | } | ||
| 59 | |||
| 60 | Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) { | ||
| 61 | R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm)); | ||
| 62 | } | ||
| 63 | |||
| 64 | Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, | ||
| 65 | Svc::MemoryPermission perm) { | ||
| 66 | R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm)); | ||
| 67 | } | ||
| 68 | |||
| 69 | Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask, | ||
| 70 | KMemoryAttribute attr) { | ||
| 71 | R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr)); | ||
| 72 | } | ||
| 73 | |||
| 74 | Result SetHeapSize(KProcessAddress* out, size_t size) { | ||
| 75 | R_RETURN(m_page_table.SetHeapSize(out, size)); | ||
| 76 | } | ||
| 77 | |||
| 78 | Result SetMaxHeapSize(size_t size) { | ||
| 79 | R_RETURN(m_page_table.SetMaxHeapSize(size)); | ||
| 80 | } | ||
| 81 | |||
| 82 | Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info, | ||
| 83 | KProcessAddress addr) const { | ||
| 84 | R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr)); | ||
| 85 | } | ||
| 86 | |||
| 87 | Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) { | ||
| 88 | R_RETURN(m_page_table.QueryPhysicalAddress(out, address)); | ||
| 89 | } | ||
| 90 | |||
| 91 | Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) { | ||
| 92 | R_RETURN(m_page_table.QueryStaticMapping(out, address, size)); | ||
| 93 | } | ||
| 94 | |||
| 95 | Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) { | ||
| 96 | R_RETURN(m_page_table.QueryIoMapping(out, address, size)); | ||
| 97 | } | ||
| 98 | |||
| 99 | Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 100 | R_RETURN(m_page_table.MapMemory(dst_address, src_address, size)); | ||
| 101 | } | ||
| 102 | |||
| 103 | Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 104 | R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size)); | ||
| 105 | } | ||
| 106 | |||
| 107 | Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 108 | R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size)); | ||
| 109 | } | ||
| 110 | |||
| 111 | Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 112 | R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size)); | ||
| 113 | } | ||
| 114 | |||
| 115 | Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 116 | R_RETURN(m_page_table.MapIo(phys_addr, size, perm)); | ||
| 117 | } | ||
| 118 | |||
| 119 | Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 120 | Svc::MemoryMapping mapping, Svc::MemoryPermission perm) { | ||
| 121 | R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm)); | ||
| 122 | } | ||
| 123 | |||
| 124 | Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, | ||
| 125 | Svc::MemoryMapping mapping) { | ||
| 126 | R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping)); | ||
| 127 | } | ||
| 128 | |||
| 129 | Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { | ||
| 130 | R_RETURN(m_page_table.MapStatic(phys_addr, size, perm)); | ||
| 131 | } | ||
| 132 | |||
| 133 | Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { | ||
| 134 | R_RETURN(m_page_table.MapRegion(region_type, perm)); | ||
| 135 | } | ||
| 136 | |||
| 137 | Result MapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 138 | R_RETURN(m_page_table.MapInsecureMemory(address, size)); | ||
| 139 | } | ||
| 140 | |||
| 141 | Result UnmapInsecureMemory(KProcessAddress address, size_t size) { | ||
| 142 | R_RETURN(m_page_table.UnmapInsecureMemory(address, size)); | ||
| 143 | } | ||
| 144 | |||
| 145 | Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state, | ||
| 146 | KMemoryPermission perm) { | ||
| 147 | R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm)); | ||
| 148 | } | ||
| 149 | |||
| 150 | Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) { | ||
| 151 | R_RETURN(m_page_table.UnmapPageGroup(address, pg, state)); | ||
| 152 | } | ||
| 153 | |||
| 154 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, | ||
| 155 | KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 156 | R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm)); | ||
| 157 | } | ||
| 158 | |||
| 159 | Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state, | ||
| 160 | KMemoryPermission perm) { | ||
| 161 | R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm)); | ||
| 162 | } | ||
| 163 | |||
| 164 | Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, | ||
| 165 | KMemoryPermission perm) { | ||
| 166 | R_RETURN(m_page_table.MapPages(address, num_pages, state, perm)); | ||
| 167 | } | ||
| 168 | |||
| 169 | Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) { | ||
| 170 | R_RETURN(m_page_table.UnmapPages(addr, num_pages, state)); | ||
| 171 | } | ||
| 172 | |||
| 173 | Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages, | ||
| 174 | KMemoryState state_mask, KMemoryState state, | ||
| 175 | KMemoryPermission perm_mask, KMemoryPermission perm, | ||
| 176 | KMemoryAttribute attr_mask, KMemoryAttribute attr) { | ||
| 177 | R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, | ||
| 178 | perm_mask, perm, attr_mask, attr)); | ||
| 179 | } | ||
| 180 | |||
| 181 | Result InvalidateProcessDataCache(KProcessAddress address, size_t size) { | ||
| 182 | R_RETURN(m_page_table.InvalidateProcessDataCache(address, size)); | ||
| 183 | } | ||
| 184 | |||
| 185 | Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 186 | R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size)); | ||
| 187 | } | ||
| 188 | |||
| 189 | Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 190 | KMemoryState state) { | ||
| 191 | R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state)); | ||
| 192 | } | ||
| 193 | |||
| 194 | Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { | ||
| 195 | R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size)); | ||
| 196 | } | ||
| 197 | |||
| 198 | Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size, | ||
| 199 | KMemoryState state) { | ||
| 200 | R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state)); | ||
| 201 | } | ||
| 202 | |||
| 203 | Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size, | ||
| 204 | KMemoryPermission perm, bool is_aligned, bool check_heap) { | ||
| 205 | R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm, | ||
| 206 | is_aligned, check_heap)); | ||
| 207 | } | ||
| 208 | |||
| 209 | Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) { | ||
| 210 | R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap)); | ||
| 211 | } | ||
| 212 | |||
| 213 | Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { | ||
| 214 | R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size)); | ||
| 215 | } | ||
| 216 | |||
| 217 | Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) { | ||
| 218 | R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size)); | ||
| 219 | } | ||
| 220 | |||
| 221 | Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 222 | KProcessAddress address, size_t size, | ||
| 223 | KMemoryPermission perm, bool is_aligned) { | ||
| 224 | R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, | ||
| 225 | is_aligned)); | ||
| 226 | } | ||
| 227 | |||
| 228 | Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out, | ||
| 229 | KProcessAddress address, size_t size) { | ||
| 230 | R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size)); | ||
| 231 | } | ||
| 232 | |||
| 233 | Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) { | ||
| 234 | R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size)); | ||
| 235 | } | ||
| 236 | |||
| 237 | Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { | ||
| 238 | R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size)); | ||
| 239 | } | ||
| 240 | |||
| 241 | Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size, | ||
| 242 | KMemoryPermission perm) { | ||
| 243 | R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm)); | ||
| 244 | } | ||
| 245 | |||
| 246 | Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) { | ||
| 247 | R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg)); | ||
| 248 | } | ||
| 249 | |||
| 250 | Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) { | ||
| 251 | R_RETURN(m_page_table.LockForCodeMemory(out, address, size)); | ||
| 252 | } | ||
| 253 | |||
| 254 | Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) { | ||
| 255 | R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg)); | ||
| 256 | } | ||
| 257 | |||
| 258 | Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out, | ||
| 259 | KProcessAddress address, size_t size) { | ||
| 260 | R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size)); | ||
| 261 | } | ||
| 262 | |||
| 263 | Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, | ||
| 264 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 265 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 266 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 267 | R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, | ||
| 268 | src_state, src_test_perm, src_attr_mask, | ||
| 269 | src_attr)); | ||
| 270 | } | ||
| 271 | |||
| 272 | Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr, | ||
| 273 | KMemoryState src_state_mask, KMemoryState src_state, | ||
| 274 | KMemoryPermission src_test_perm, | ||
| 275 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 276 | R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, | ||
| 277 | src_state, src_test_perm, src_attr_mask, | ||
| 278 | src_attr)); | ||
| 279 | } | ||
| 280 | |||
| 281 | Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, | ||
| 282 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 283 | KMemoryPermission dst_test_perm, | ||
| 284 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 285 | KProcessAddress src_addr) { | ||
| 286 | R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, | ||
| 287 | dst_test_perm, dst_attr_mask, dst_attr, | ||
| 288 | src_addr)); | ||
| 289 | } | ||
| 290 | |||
| 291 | Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, | ||
| 292 | KMemoryState dst_state_mask, KMemoryState dst_state, | ||
| 293 | KMemoryPermission dst_test_perm, | ||
| 294 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 295 | void* src_addr) { | ||
| 296 | R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, | ||
| 297 | dst_state, dst_test_perm, dst_attr_mask, | ||
| 298 | dst_attr, src_addr)); | ||
| 299 | } | ||
| 300 | |||
| 301 | Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr, | ||
| 302 | size_t size, KMemoryState dst_state_mask, | ||
| 303 | KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 304 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, | ||
| 305 | KProcessAddress src_addr, KMemoryState src_state_mask, | ||
| 306 | KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 307 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 308 | R_RETURN(m_page_table.CopyMemoryFromHeapToHeap( | ||
| 309 | dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, | ||
| 310 | dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, | ||
| 311 | src_attr_mask, src_attr)); | ||
| 312 | } | ||
| 313 | |||
| 314 | Result CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 315 | KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size, | ||
| 316 | KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm, | ||
| 317 | KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr, | ||
| 318 | KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm, | ||
| 319 | KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) { | ||
| 320 | R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination( | ||
| 321 | dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, | ||
| 322 | dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, | ||
| 323 | src_attr_mask, src_attr)); | ||
| 324 | } | ||
| 325 | |||
| 326 | Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr, | ||
| 327 | KProcessPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 328 | KMemoryState dst_state, bool send) { | ||
| 329 | R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, | ||
| 330 | test_perm, dst_state, send)); | ||
| 331 | } | ||
| 332 | |||
| 333 | Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) { | ||
| 334 | R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state)); | ||
| 335 | } | ||
| 336 | |||
| 337 | Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) { | ||
| 338 | R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state)); | ||
| 339 | } | ||
| 340 | |||
| 341 | Result MapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 342 | R_RETURN(m_page_table.MapPhysicalMemory(address, size)); | ||
| 343 | } | ||
| 344 | |||
| 345 | Result UnmapPhysicalMemory(KProcessAddress address, size_t size) { | ||
| 346 | R_RETURN(m_page_table.UnmapPhysicalMemory(address, size)); | ||
| 347 | } | ||
| 348 | |||
| 349 | Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 350 | R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size)); | ||
| 351 | } | ||
| 352 | |||
| 353 | Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { | ||
| 354 | R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size)); | ||
| 355 | } | ||
| 356 | |||
| 357 | Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, | ||
| 358 | KProcessPageTable& src_page_table, KProcessAddress src_address) { | ||
| 359 | R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, | ||
| 360 | src_address)); | ||
| 361 | } | ||
| 362 | |||
| 363 | bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) { | ||
| 364 | return m_page_table.GetPhysicalAddress(out, address); | ||
| 365 | } | ||
| 366 | |||
| 367 | bool Contains(KProcessAddress addr, size_t size) const { | ||
| 368 | return m_page_table.Contains(addr, size); | ||
| 369 | } | ||
| 370 | |||
| 371 | bool IsInAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 372 | return m_page_table.IsInAliasRegion(addr, size); | ||
| 373 | } | ||
| 374 | bool IsInHeapRegion(KProcessAddress addr, size_t size) const { | ||
| 375 | return m_page_table.IsInHeapRegion(addr, size); | ||
| 376 | } | ||
| 377 | bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { | ||
| 378 | return m_page_table.IsInUnsafeAliasRegion(addr, size); | ||
| 379 | } | ||
| 380 | |||
| 381 | bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { | ||
| 382 | return m_page_table.CanContain(addr, size, state); | ||
| 383 | } | ||
| 384 | |||
| 385 | KProcessAddress GetAddressSpaceStart() const { | ||
| 386 | return m_page_table.GetAddressSpaceStart(); | ||
| 387 | } | ||
| 388 | KProcessAddress GetHeapRegionStart() const { | ||
| 389 | return m_page_table.GetHeapRegionStart(); | ||
| 390 | } | ||
| 391 | KProcessAddress GetAliasRegionStart() const { | ||
| 392 | return m_page_table.GetAliasRegionStart(); | ||
| 393 | } | ||
| 394 | KProcessAddress GetStackRegionStart() const { | ||
| 395 | return m_page_table.GetStackRegionStart(); | ||
| 396 | } | ||
| 397 | KProcessAddress GetKernelMapRegionStart() const { | ||
| 398 | return m_page_table.GetKernelMapRegionStart(); | ||
| 399 | } | ||
| 400 | KProcessAddress GetCodeRegionStart() const { | ||
| 401 | return m_page_table.GetCodeRegionStart(); | ||
| 402 | } | ||
| 403 | KProcessAddress GetAliasCodeRegionStart() const { | ||
| 404 | return m_page_table.GetAliasCodeRegionStart(); | ||
| 405 | } | ||
| 406 | |||
| 407 | size_t GetAddressSpaceSize() const { | ||
| 408 | return m_page_table.GetAddressSpaceSize(); | ||
| 409 | } | ||
| 410 | size_t GetHeapRegionSize() const { | ||
| 411 | return m_page_table.GetHeapRegionSize(); | ||
| 412 | } | ||
| 413 | size_t GetAliasRegionSize() const { | ||
| 414 | return m_page_table.GetAliasRegionSize(); | ||
| 415 | } | ||
| 416 | size_t GetStackRegionSize() const { | ||
| 417 | return m_page_table.GetStackRegionSize(); | ||
| 418 | } | ||
| 419 | size_t GetKernelMapRegionSize() const { | ||
| 420 | return m_page_table.GetKernelMapRegionSize(); | ||
| 421 | } | ||
| 422 | size_t GetCodeRegionSize() const { | ||
| 423 | return m_page_table.GetCodeRegionSize(); | ||
| 424 | } | ||
| 425 | size_t GetAliasCodeRegionSize() const { | ||
| 426 | return m_page_table.GetAliasCodeRegionSize(); | ||
| 427 | } | ||
| 428 | |||
| 429 | size_t GetNormalMemorySize() const { | ||
| 430 | return m_page_table.GetNormalMemorySize(); | ||
| 431 | } | ||
| 432 | |||
| 433 | size_t GetCodeSize() const { | ||
| 434 | return m_page_table.GetCodeSize(); | ||
| 435 | } | ||
| 436 | size_t GetCodeDataSize() const { | ||
| 437 | return m_page_table.GetCodeDataSize(); | ||
| 438 | } | ||
| 439 | |||
| 440 | size_t GetAliasCodeSize() const { | ||
| 441 | return m_page_table.GetAliasCodeSize(); | ||
| 442 | } | ||
| 443 | size_t GetAliasCodeDataSize() const { | ||
| 444 | return m_page_table.GetAliasCodeDataSize(); | ||
| 445 | } | ||
| 446 | |||
| 447 | u32 GetAllocateOption() const { | ||
| 448 | return m_page_table.GetAllocateOption(); | ||
| 449 | } | ||
| 450 | |||
| 451 | u32 GetAddressSpaceWidth() const { | ||
| 452 | return m_page_table.GetAddressSpaceWidth(); | ||
| 453 | } | ||
| 454 | |||
| 455 | KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) { | ||
| 456 | return m_page_table.GetHeapPhysicalAddress(address); | ||
| 457 | } | ||
| 458 | |||
| 459 | u8* GetHeapVirtualPointer(KPhysicalAddress address) { | ||
| 460 | return m_page_table.GetHeapVirtualPointer(address); | ||
| 461 | } | ||
| 462 | |||
| 463 | KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) { | ||
| 464 | return m_page_table.GetHeapVirtualAddress(address); | ||
| 465 | } | ||
| 466 | |||
| 467 | KBlockInfoManager* GetBlockInfoManager() { | ||
| 468 | return m_page_table.GetBlockInfoManager(); | ||
| 469 | } | ||
| 470 | |||
| 471 | KPageTable& GetBasePageTable() { | ||
| 472 | return m_page_table; | ||
| 473 | } | ||
| 474 | |||
| 475 | const KPageTable& GetBasePageTable() const { | ||
| 476 | return m_page_table; | ||
| 477 | } | ||
| 478 | }; | ||
| 479 | |||
| 480 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index c64ceb530..3ea653163 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 383 | if (event != nullptr) { | 383 | if (event != nullptr) { |
| 384 | // // Get the client process/page table. | 384 | // // Get the client process/page table. |
| 385 | // KProcess *client_process = client_thread->GetOwnerProcess(); | 385 | // KProcess *client_process = client_thread->GetOwnerProcess(); |
| 386 | // KPageTable *client_page_table = std::addressof(client_process->PageTable()); | 386 | // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable()); |
| 387 | 387 | ||
| 388 | // // If we need to, reply with an async error. | 388 | // // If we need to, reply with an async error. |
| 389 | // if (R_FAILED(client_result)) { | 389 | // if (R_FAILED(client_result)) { |
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp index 07e92aa80..b51941faf 100644 --- a/src/core/hle/kernel/k_system_resource.cpp +++ b/src/core/hle/kernel/k_system_resource.cpp | |||
| @@ -40,7 +40,7 @@ Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_l | |||
| 40 | 40 | ||
| 41 | // Get resource pointer. | 41 | // Get resource pointer. |
| 42 | KPhysicalAddress resource_paddr = | 42 | KPhysicalAddress resource_paddr = |
| 43 | KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address); | 43 | KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address); |
| 44 | auto* resource = | 44 | auto* resource = |
| 45 | m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr); | 45 | m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr); |
| 46 | 46 | ||
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp index 2c45b4232..a632d1634 100644 --- a/src/core/hle/kernel/k_thread_local_page.cpp +++ b/src/core/hle/kernel/k_thread_local_page.cpp | |||
| @@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { | |||
| 37 | 37 | ||
| 38 | Result KThreadLocalPage::Finalize() { | 38 | Result KThreadLocalPage::Finalize() { |
| 39 | // Get the physical address of the page. | 39 | // Get the physical address of the page. |
| 40 | const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr); | 40 | KPhysicalAddress phys_addr{}; |
| 41 | ASSERT(phys_addr); | 41 | ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr)); |
| 42 | 42 | ||
| 43 | // Unmap the page. | 43 | // Unmap the page. |
| 44 | R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); | 44 | R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); |
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp deleted file mode 100644 index 773319ad8..000000000 --- a/src/core/hle/kernel/process_capability.cpp +++ /dev/null | |||
| @@ -1,389 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include <bit> | ||
| 5 | |||
| 6 | #include "common/bit_util.h" | ||
| 7 | #include "common/logging/log.h" | ||
| 8 | #include "core/hle/kernel/k_handle_table.h" | ||
| 9 | #include "core/hle/kernel/k_page_table.h" | ||
| 10 | #include "core/hle/kernel/process_capability.h" | ||
| 11 | #include "core/hle/kernel/svc_results.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | namespace { | ||
| 15 | |||
| 16 | // clang-format off | ||
| 17 | |||
| 18 | // Shift offsets for kernel capability types. | ||
| 19 | enum : u32 { | ||
| 20 | CapabilityOffset_PriorityAndCoreNum = 3, | ||
| 21 | CapabilityOffset_Syscall = 4, | ||
| 22 | CapabilityOffset_MapPhysical = 6, | ||
| 23 | CapabilityOffset_MapIO = 7, | ||
| 24 | CapabilityOffset_MapRegion = 10, | ||
| 25 | CapabilityOffset_Interrupt = 11, | ||
| 26 | CapabilityOffset_ProgramType = 13, | ||
| 27 | CapabilityOffset_KernelVersion = 14, | ||
| 28 | CapabilityOffset_HandleTableSize = 15, | ||
| 29 | CapabilityOffset_Debug = 16, | ||
| 30 | }; | ||
| 31 | |||
| 32 | // Combined mask of all parameters that may be initialized only once. | ||
| 33 | constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) | | ||
| 34 | (1U << CapabilityOffset_ProgramType) | | ||
| 35 | (1U << CapabilityOffset_KernelVersion) | | ||
| 36 | (1U << CapabilityOffset_HandleTableSize) | | ||
| 37 | (1U << CapabilityOffset_Debug); | ||
| 38 | |||
| 39 | // Packed kernel version indicating 10.4.0 | ||
| 40 | constexpr u32 PackedKernelVersion = 0x520000; | ||
| 41 | |||
| 42 | // Indicates possible types of capabilities that can be specified. | ||
| 43 | enum class CapabilityType : u32 { | ||
| 44 | Unset = 0U, | ||
| 45 | PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1, | ||
| 46 | Syscall = (1U << CapabilityOffset_Syscall) - 1, | ||
| 47 | MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1, | ||
| 48 | MapIO = (1U << CapabilityOffset_MapIO) - 1, | ||
| 49 | MapRegion = (1U << CapabilityOffset_MapRegion) - 1, | ||
| 50 | Interrupt = (1U << CapabilityOffset_Interrupt) - 1, | ||
| 51 | ProgramType = (1U << CapabilityOffset_ProgramType) - 1, | ||
| 52 | KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1, | ||
| 53 | HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1, | ||
| 54 | Debug = (1U << CapabilityOffset_Debug) - 1, | ||
| 55 | Ignorable = 0xFFFFFFFFU, | ||
| 56 | }; | ||
| 57 | |||
| 58 | // clang-format on | ||
| 59 | |||
| 60 | constexpr CapabilityType GetCapabilityType(u32 value) { | ||
| 61 | return static_cast<CapabilityType>((~value & (value + 1)) - 1); | ||
| 62 | } | ||
| 63 | |||
| 64 | u32 GetFlagBitOffset(CapabilityType type) { | ||
| 65 | const auto value = static_cast<u32>(type); | ||
| 66 | return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value))); | ||
| 67 | } | ||
| 68 | |||
| 69 | } // Anonymous namespace | ||
| 70 | |||
| 71 | Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, | ||
| 72 | std::size_t num_capabilities, | ||
| 73 | KPageTable& page_table) { | ||
| 74 | Clear(); | ||
| 75 | |||
| 76 | // Allow all cores and priorities. | ||
| 77 | core_mask = 0xF; | ||
| 78 | priority_mask = 0xFFFFFFFFFFFFFFFF; | ||
| 79 | kernel_version = PackedKernelVersion; | ||
| 80 | |||
| 81 | return ParseCapabilities(capabilities, num_capabilities, page_table); | ||
| 82 | } | ||
| 83 | |||
| 84 | Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, | ||
| 85 | std::size_t num_capabilities, | ||
| 86 | KPageTable& page_table) { | ||
| 87 | Clear(); | ||
| 88 | |||
| 89 | return ParseCapabilities(capabilities, num_capabilities, page_table); | ||
| 90 | } | ||
| 91 | |||
| 92 | void ProcessCapabilities::InitializeForMetadatalessProcess() { | ||
| 93 | // Allow all cores and priorities | ||
| 94 | core_mask = 0xF; | ||
| 95 | priority_mask = 0xFFFFFFFFFFFFFFFF; | ||
| 96 | kernel_version = PackedKernelVersion; | ||
| 97 | |||
| 98 | // Allow all system calls and interrupts. | ||
| 99 | svc_capabilities.set(); | ||
| 100 | interrupt_capabilities.set(); | ||
| 101 | |||
| 102 | // Allow using the maximum possible amount of handles | ||
| 103 | handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize); | ||
| 104 | |||
| 105 | // Allow all debugging capabilities. | ||
| 106 | is_debuggable = true; | ||
| 107 | can_force_debug = true; | ||
| 108 | } | ||
| 109 | |||
| 110 | Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, | ||
| 111 | KPageTable& page_table) { | ||
| 112 | u32 set_flags = 0; | ||
| 113 | u32 set_svc_bits = 0; | ||
| 114 | |||
| 115 | for (std::size_t i = 0; i < num_capabilities; ++i) { | ||
| 116 | const u32 descriptor = capabilities[i]; | ||
| 117 | const auto type = GetCapabilityType(descriptor); | ||
| 118 | |||
| 119 | if (type == CapabilityType::MapPhysical) { | ||
| 120 | i++; | ||
| 121 | |||
| 122 | // The MapPhysical type uses two descriptor flags for its parameters. | ||
| 123 | // If there's only one, then there's a problem. | ||
| 124 | if (i >= num_capabilities) { | ||
| 125 | LOG_ERROR(Kernel, "Invalid combination! i={}", i); | ||
| 126 | return ResultInvalidCombination; | ||
| 127 | } | ||
| 128 | |||
| 129 | const auto size_flags = capabilities[i]; | ||
| 130 | if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) { | ||
| 131 | LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags); | ||
| 132 | return ResultInvalidCombination; | ||
| 133 | } | ||
| 134 | |||
| 135 | const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table); | ||
| 136 | if (result.IsError()) { | ||
| 137 | LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}", | ||
| 138 | descriptor, size_flags); | ||
| 139 | return result; | ||
| 140 | } | ||
| 141 | } else { | ||
| 142 | const auto result = | ||
| 143 | ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table); | ||
| 144 | if (result.IsError()) { | ||
| 145 | LOG_ERROR( | ||
| 146 | Kernel, | ||
| 147 | "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}", | ||
| 148 | set_flags, set_svc_bits, descriptor); | ||
| 149 | return result; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 154 | return ResultSuccess; | ||
| 155 | } | ||
| 156 | |||
| 157 | Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, | ||
| 158 | KPageTable& page_table) { | ||
| 159 | const auto type = GetCapabilityType(flag); | ||
| 160 | |||
| 161 | if (type == CapabilityType::Unset) { | ||
| 162 | return ResultInvalidArgument; | ||
| 163 | } | ||
| 164 | |||
| 165 | // Bail early on ignorable entries, as one would expect, | ||
| 166 | // ignorable descriptors can be ignored. | ||
| 167 | if (type == CapabilityType::Ignorable) { | ||
| 168 | return ResultSuccess; | ||
| 169 | } | ||
| 170 | |||
| 171 | // Ensure that the give flag hasn't already been initialized before. | ||
| 172 | // If it has been, then bail. | ||
| 173 | const u32 flag_length = GetFlagBitOffset(type); | ||
| 174 | const u32 set_flag = 1U << flag_length; | ||
| 175 | if ((set_flag & set_flags & InitializeOnceMask) != 0) { | ||
| 176 | LOG_ERROR(Kernel, | ||
| 177 | "Attempted to initialize flags that may only be initialized once. set_flags={}", | ||
| 178 | set_flags); | ||
| 179 | return ResultInvalidCombination; | ||
| 180 | } | ||
| 181 | set_flags |= set_flag; | ||
| 182 | |||
| 183 | switch (type) { | ||
| 184 | case CapabilityType::PriorityAndCoreNum: | ||
| 185 | return HandlePriorityCoreNumFlags(flag); | ||
| 186 | case CapabilityType::Syscall: | ||
| 187 | return HandleSyscallFlags(set_svc_bits, flag); | ||
| 188 | case CapabilityType::MapIO: | ||
| 189 | return HandleMapIOFlags(flag, page_table); | ||
| 190 | case CapabilityType::MapRegion: | ||
| 191 | return HandleMapRegionFlags(flag, page_table); | ||
| 192 | case CapabilityType::Interrupt: | ||
| 193 | return HandleInterruptFlags(flag); | ||
| 194 | case CapabilityType::ProgramType: | ||
| 195 | return HandleProgramTypeFlags(flag); | ||
| 196 | case CapabilityType::KernelVersion: | ||
| 197 | return HandleKernelVersionFlags(flag); | ||
| 198 | case CapabilityType::HandleTableSize: | ||
| 199 | return HandleHandleTableFlags(flag); | ||
| 200 | case CapabilityType::Debug: | ||
| 201 | return HandleDebugFlags(flag); | ||
| 202 | default: | ||
| 203 | break; | ||
| 204 | } | ||
| 205 | |||
| 206 | LOG_ERROR(Kernel, "Invalid capability type! type={}", type); | ||
| 207 | return ResultInvalidArgument; | ||
| 208 | } | ||
| 209 | |||
| 210 | void ProcessCapabilities::Clear() { | ||
| 211 | svc_capabilities.reset(); | ||
| 212 | interrupt_capabilities.reset(); | ||
| 213 | |||
| 214 | core_mask = 0; | ||
| 215 | priority_mask = 0; | ||
| 216 | |||
| 217 | handle_table_size = 0; | ||
| 218 | kernel_version = 0; | ||
| 219 | |||
| 220 | program_type = ProgramType::SysModule; | ||
| 221 | |||
| 222 | is_debuggable = false; | ||
| 223 | can_force_debug = false; | ||
| 224 | } | ||
| 225 | |||
| 226 | Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) { | ||
| 227 | if (priority_mask != 0 || core_mask != 0) { | ||
| 228 | LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}", | ||
| 229 | priority_mask, core_mask); | ||
| 230 | return ResultInvalidArgument; | ||
| 231 | } | ||
| 232 | |||
| 233 | const u32 core_num_min = (flags >> 16) & 0xFF; | ||
| 234 | const u32 core_num_max = (flags >> 24) & 0xFF; | ||
| 235 | if (core_num_min > core_num_max) { | ||
| 236 | LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}", | ||
| 237 | core_num_min, core_num_max); | ||
| 238 | return ResultInvalidCombination; | ||
| 239 | } | ||
| 240 | |||
| 241 | const u32 priority_min = (flags >> 10) & 0x3F; | ||
| 242 | const u32 priority_max = (flags >> 4) & 0x3F; | ||
| 243 | if (priority_min > priority_max) { | ||
| 244 | LOG_ERROR(Kernel, | ||
| 245 | "Priority min is greater than priority max! priority_min={}, priority_max={}", | ||
| 246 | core_num_min, priority_max); | ||
| 247 | return ResultInvalidCombination; | ||
| 248 | } | ||
| 249 | |||
| 250 | // The switch only has 4 usable cores. | ||
| 251 | if (core_num_max >= 4) { | ||
| 252 | LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max); | ||
| 253 | return ResultInvalidCoreId; | ||
| 254 | } | ||
| 255 | |||
| 256 | const auto make_mask = [](u64 min, u64 max) { | ||
| 257 | const u64 range = max - min + 1; | ||
| 258 | const u64 mask = (1ULL << range) - 1; | ||
| 259 | |||
| 260 | return mask << min; | ||
| 261 | }; | ||
| 262 | |||
| 263 | core_mask = make_mask(core_num_min, core_num_max); | ||
| 264 | priority_mask = make_mask(priority_min, priority_max); | ||
| 265 | return ResultSuccess; | ||
| 266 | } | ||
| 267 | |||
| 268 | Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) { | ||
| 269 | const u32 index = flags >> 29; | ||
| 270 | const u32 svc_bit = 1U << index; | ||
| 271 | |||
| 272 | // If we've already set this svc before, bail. | ||
| 273 | if ((set_svc_bits & svc_bit) != 0) { | ||
| 274 | return ResultInvalidCombination; | ||
| 275 | } | ||
| 276 | set_svc_bits |= svc_bit; | ||
| 277 | |||
| 278 | const u32 svc_mask = (flags >> 5) & 0xFFFFFF; | ||
| 279 | for (u32 i = 0; i < 24; ++i) { | ||
| 280 | const u32 svc_number = index * 24 + i; | ||
| 281 | |||
| 282 | if ((svc_mask & (1U << i)) == 0) { | ||
| 283 | continue; | ||
| 284 | } | ||
| 285 | |||
| 286 | svc_capabilities[svc_number] = true; | ||
| 287 | } | ||
| 288 | |||
| 289 | return ResultSuccess; | ||
| 290 | } | ||
| 291 | |||
| 292 | Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, | ||
| 293 | KPageTable& page_table) { | ||
| 294 | // TODO(Lioncache): Implement once the memory manager can handle this. | ||
| 295 | return ResultSuccess; | ||
| 296 | } | ||
| 297 | |||
| 298 | Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) { | ||
| 299 | // TODO(Lioncache): Implement once the memory manager can handle this. | ||
| 300 | return ResultSuccess; | ||
| 301 | } | ||
| 302 | |||
| 303 | Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) { | ||
| 304 | // TODO(Lioncache): Implement once the memory manager can handle this. | ||
| 305 | return ResultSuccess; | ||
| 306 | } | ||
| 307 | |||
| 308 | Result ProcessCapabilities::HandleInterruptFlags(u32 flags) { | ||
| 309 | constexpr u32 interrupt_ignore_value = 0x3FF; | ||
| 310 | const u32 interrupt0 = (flags >> 12) & 0x3FF; | ||
| 311 | const u32 interrupt1 = (flags >> 22) & 0x3FF; | ||
| 312 | |||
| 313 | for (u32 interrupt : {interrupt0, interrupt1}) { | ||
| 314 | if (interrupt == interrupt_ignore_value) { | ||
| 315 | continue; | ||
| 316 | } | ||
| 317 | |||
| 318 | // NOTE: | ||
| 319 | // This should be checking a generic interrupt controller value | ||
| 320 | // as part of the calculation, however, given we don't currently | ||
| 321 | // emulate that, it's sufficient to mark every interrupt as defined. | ||
| 322 | |||
| 323 | if (interrupt >= interrupt_capabilities.size()) { | ||
| 324 | LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}", | ||
| 325 | interrupt); | ||
| 326 | return ResultOutOfRange; | ||
| 327 | } | ||
| 328 | |||
| 329 | interrupt_capabilities[interrupt] = true; | ||
| 330 | } | ||
| 331 | |||
| 332 | return ResultSuccess; | ||
| 333 | } | ||
| 334 | |||
| 335 | Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) { | ||
| 336 | const u32 reserved = flags >> 17; | ||
| 337 | if (reserved != 0) { | ||
| 338 | LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); | ||
| 339 | return ResultReservedUsed; | ||
| 340 | } | ||
| 341 | |||
| 342 | program_type = static_cast<ProgramType>((flags >> 14) & 0b111); | ||
| 343 | return ResultSuccess; | ||
| 344 | } | ||
| 345 | |||
| 346 | Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) { | ||
| 347 | // Yes, the internal member variable is checked in the actual kernel here. | ||
| 348 | // This might look odd for options that are only allowed to be initialized | ||
| 349 | // just once, however the kernel has a separate initialization function for | ||
| 350 | // kernel processes and userland processes. The kernel variant sets this | ||
| 351 | // member variable ahead of time. | ||
| 352 | |||
| 353 | const u32 major_version = kernel_version >> 19; | ||
| 354 | |||
| 355 | if (major_version != 0 || flags < 0x80000) { | ||
| 356 | LOG_ERROR(Kernel, | ||
| 357 | "Kernel version is non zero or flags are too small! major_version={}, flags={}", | ||
| 358 | major_version, flags); | ||
| 359 | return ResultInvalidArgument; | ||
| 360 | } | ||
| 361 | |||
| 362 | kernel_version = flags; | ||
| 363 | return ResultSuccess; | ||
| 364 | } | ||
| 365 | |||
| 366 | Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) { | ||
| 367 | const u32 reserved = flags >> 26; | ||
| 368 | if (reserved != 0) { | ||
| 369 | LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); | ||
| 370 | return ResultReservedUsed; | ||
| 371 | } | ||
| 372 | |||
| 373 | handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); | ||
| 374 | return ResultSuccess; | ||
| 375 | } | ||
| 376 | |||
| 377 | Result ProcessCapabilities::HandleDebugFlags(u32 flags) { | ||
| 378 | const u32 reserved = flags >> 19; | ||
| 379 | if (reserved != 0) { | ||
| 380 | LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); | ||
| 381 | return ResultReservedUsed; | ||
| 382 | } | ||
| 383 | |||
| 384 | is_debuggable = (flags & 0x20000) != 0; | ||
| 385 | can_force_debug = (flags & 0x40000) != 0; | ||
| 386 | return ResultSuccess; | ||
| 387 | } | ||
| 388 | |||
| 389 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h deleted file mode 100644 index ff05dc5ff..000000000 --- a/src/core/hle/kernel/process_capability.h +++ /dev/null | |||
| @@ -1,266 +0,0 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <bitset> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | union Result; | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | class KPageTable; | ||
| 15 | |||
| 16 | /// The possible types of programs that may be indicated | ||
| 17 | /// by the program type capability descriptor. | ||
| 18 | enum class ProgramType { | ||
| 19 | SysModule, | ||
| 20 | Application, | ||
| 21 | Applet, | ||
| 22 | }; | ||
| 23 | |||
| 24 | /// Handles kernel capability descriptors that are provided by | ||
| 25 | /// application metadata. These descriptors provide information | ||
| 26 | /// that alters certain parameters for kernel process instance | ||
| 27 | /// that will run said application (or applet). | ||
| 28 | /// | ||
| 29 | /// Capabilities are a sequence of flag descriptors, that indicate various | ||
| 30 | /// configurations and constraints for a particular process. | ||
| 31 | /// | ||
| 32 | /// Flag types are indicated by a sequence of set low bits. E.g. the | ||
| 33 | /// types are indicated with the low bits as follows (where x indicates "don't care"): | ||
| 34 | /// | ||
| 35 | /// - Priority and core mask : 0bxxxxxxxxxxxx0111 | ||
| 36 | /// - Allowed service call mask: 0bxxxxxxxxxxx01111 | ||
| 37 | /// - Map physical memory : 0bxxxxxxxxx0111111 | ||
| 38 | /// - Map IO memory : 0bxxxxxxxx01111111 | ||
| 39 | /// - Interrupts : 0bxxxx011111111111 | ||
| 40 | /// - Application type : 0bxx01111111111111 | ||
| 41 | /// - Kernel version : 0bx011111111111111 | ||
| 42 | /// - Handle table size : 0b0111111111111111 | ||
| 43 | /// - Debugger flags : 0b1111111111111111 | ||
| 44 | /// | ||
| 45 | /// These are essentially a bit offset subtracted by 1 to create a mask. | ||
| 46 | /// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000) | ||
| 47 | /// subtracted by one (7 -> 0b0111) | ||
| 48 | /// | ||
| 49 | /// An example of a bit layout (using the map physical layout): | ||
| 50 | /// <example> | ||
| 51 | /// The MapPhysical type indicates a sequence entry pair of: | ||
| 52 | /// | ||
| 53 | /// [initial, memory_flags], where: | ||
| 54 | /// | ||
| 55 | /// initial: | ||
| 56 | /// bits: | ||
| 57 | /// 7-24: Starting page to map memory at. | ||
| 58 | /// 25 : Indicates if the memory should be mapped as read only. | ||
| 59 | /// | ||
| 60 | /// memory_flags: | ||
| 61 | /// bits: | ||
| 62 | /// 7-20 : Number of pages to map | ||
| 63 | /// 21-25: Seems to be reserved (still checked against though) | ||
| 64 | /// 26 : Whether or not the memory being mapped is IO memory, or physical memory | ||
| 65 | /// </example> | ||
| 66 | /// | ||
| 67 | class ProcessCapabilities { | ||
| 68 | public: | ||
| 69 | using InterruptCapabilities = std::bitset<1024>; | ||
| 70 | using SyscallCapabilities = std::bitset<192>; | ||
| 71 | |||
| 72 | ProcessCapabilities() = default; | ||
| 73 | ProcessCapabilities(const ProcessCapabilities&) = delete; | ||
| 74 | ProcessCapabilities(ProcessCapabilities&&) = default; | ||
| 75 | |||
| 76 | ProcessCapabilities& operator=(const ProcessCapabilities&) = delete; | ||
| 77 | ProcessCapabilities& operator=(ProcessCapabilities&&) = default; | ||
| 78 | |||
| 79 | /// Initializes this process capabilities instance for a kernel process. | ||
| 80 | /// | ||
| 81 | /// @param capabilities The capabilities to parse | ||
| 82 | /// @param num_capabilities The number of capabilities to parse. | ||
| 83 | /// @param page_table The memory manager to use for handling any mapping-related | ||
| 84 | /// operations (such as mapping IO memory, etc). | ||
| 85 | /// | ||
| 86 | /// @returns ResultSuccess if this capabilities instance was able to be initialized, | ||
| 87 | /// otherwise, an error code upon failure. | ||
| 88 | /// | ||
| 89 | Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, | ||
| 90 | KPageTable& page_table); | ||
| 91 | |||
| 92 | /// Initializes this process capabilities instance for a userland process. | ||
| 93 | /// | ||
| 94 | /// @param capabilities The capabilities to parse. | ||
| 95 | /// @param num_capabilities The total number of capabilities to parse. | ||
| 96 | /// @param page_table The memory manager to use for handling any mapping-related | ||
| 97 | /// operations (such as mapping IO memory, etc). | ||
| 98 | /// | ||
| 99 | /// @returns ResultSuccess if this capabilities instance was able to be initialized, | ||
| 100 | /// otherwise, an error code upon failure. | ||
| 101 | /// | ||
| 102 | Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, | ||
| 103 | KPageTable& page_table); | ||
| 104 | |||
| 105 | /// Initializes this process capabilities instance for a process that does not | ||
| 106 | /// have any metadata to parse. | ||
| 107 | /// | ||
| 108 | /// This is necessary, as we allow running raw executables, and the internal | ||
| 109 | /// kernel process capabilities also determine what CPU cores the process is | ||
| 110 | /// allowed to run on, and what priorities are allowed for threads. It also | ||
| 111 | /// determines the max handle table size, what the program type is, whether or | ||
| 112 | /// not the process can be debugged, or whether it's possible for a process to | ||
| 113 | /// forcibly debug another process. | ||
| 114 | /// | ||
| 115 | /// Given the above, this essentially enables all capabilities across the board | ||
| 116 | /// for the process. It allows the process to: | ||
| 117 | /// | ||
| 118 | /// - Run on any core | ||
| 119 | /// - Use any thread priority | ||
| 120 | /// - Use the maximum amount of handles a process is allowed to. | ||
| 121 | /// - Be debuggable | ||
| 122 | /// - Forcibly debug other processes. | ||
| 123 | /// | ||
| 124 | /// Note that this is not a behavior that the kernel allows a process to do via | ||
| 125 | /// a single function like this. This is yuzu-specific behavior to handle | ||
| 126 | /// executables with no capability descriptors whatsoever to derive behavior from. | ||
| 127 | /// It being yuzu-specific is why this is also not the default behavior and not | ||
| 128 | /// done by default in the constructor. | ||
| 129 | /// | ||
| 130 | void InitializeForMetadatalessProcess(); | ||
| 131 | |||
| 132 | /// Gets the allowable core mask | ||
| 133 | u64 GetCoreMask() const { | ||
| 134 | return core_mask; | ||
| 135 | } | ||
| 136 | |||
| 137 | /// Gets the allowable priority mask | ||
| 138 | u64 GetPriorityMask() const { | ||
| 139 | return priority_mask; | ||
| 140 | } | ||
| 141 | |||
| 142 | /// Gets the SVC access permission bits | ||
| 143 | const SyscallCapabilities& GetServiceCapabilities() const { | ||
| 144 | return svc_capabilities; | ||
| 145 | } | ||
| 146 | |||
| 147 | /// Gets the valid interrupt bits. | ||
| 148 | const InterruptCapabilities& GetInterruptCapabilities() const { | ||
| 149 | return interrupt_capabilities; | ||
| 150 | } | ||
| 151 | |||
| 152 | /// Gets the program type for this process. | ||
| 153 | ProgramType GetProgramType() const { | ||
| 154 | return program_type; | ||
| 155 | } | ||
| 156 | |||
| 157 | /// Gets the number of total allowable handles for the process' handle table. | ||
| 158 | s32 GetHandleTableSize() const { | ||
| 159 | return handle_table_size; | ||
| 160 | } | ||
| 161 | |||
| 162 | /// Gets the kernel version value. | ||
| 163 | u32 GetKernelVersion() const { | ||
| 164 | return kernel_version; | ||
| 165 | } | ||
| 166 | |||
| 167 | /// Whether or not this process can be debugged. | ||
| 168 | bool IsDebuggable() const { | ||
| 169 | return is_debuggable; | ||
| 170 | } | ||
| 171 | |||
| 172 | /// Whether or not this process can forcibly debug another | ||
| 173 | /// process, even if that process is not considered debuggable. | ||
| 174 | bool CanForceDebug() const { | ||
| 175 | return can_force_debug; | ||
| 176 | } | ||
| 177 | |||
| 178 | private: | ||
| 179 | /// Attempts to parse a given sequence of capability descriptors. | ||
| 180 | /// | ||
| 181 | /// @param capabilities The sequence of capability descriptors to parse. | ||
| 182 | /// @param num_capabilities The number of descriptors within the given sequence. | ||
| 183 | /// @param page_table The memory manager that will perform any memory | ||
| 184 | /// mapping if necessary. | ||
| 185 | /// | ||
| 186 | /// @return ResultSuccess if no errors occur, otherwise an error code. | ||
| 187 | /// | ||
| 188 | Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, | ||
| 189 | KPageTable& page_table); | ||
| 190 | |||
| 191 | /// Attempts to parse a capability descriptor that is only represented by a | ||
| 192 | /// single flag set. | ||
| 193 | /// | ||
| 194 | /// @param set_flags Running set of flags that are used to catch | ||
| 195 | /// flags being initialized more than once when they shouldn't be. | ||
| 196 | /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask. | ||
| 197 | /// @param flag The flag to attempt to parse. | ||
| 198 | /// @param page_table The memory manager that will perform any memory | ||
| 199 | /// mapping if necessary. | ||
| 200 | /// | ||
| 201 | /// @return ResultSuccess if no errors occurred, otherwise an error code. | ||
| 202 | /// | ||
| 203 | Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, | ||
| 204 | KPageTable& page_table); | ||
| 205 | |||
| 206 | /// Clears the internal state of this process capability instance. Necessary, | ||
| 207 | /// to have a sane starting point due to us allowing running executables without | ||
| 208 | /// configuration metadata. We assume a process is not going to have metadata, | ||
| 209 | /// and if it turns out that the process does, in fact, have metadata, then | ||
| 210 | /// we attempt to parse it. Thus, we need this to reset data members back to | ||
| 211 | /// a good state. | ||
| 212 | /// | ||
| 213 | /// DO NOT ever make this a public member function. This isn't an invariant | ||
| 214 | /// anything external should depend upon (and if anything comes to rely on it, | ||
| 215 | /// you should immediately be questioning the design of that thing, not this | ||
| 216 | /// class. If the kernel itself can run without depending on behavior like that, | ||
| 217 | /// then so can yuzu). | ||
| 218 | /// | ||
| 219 | void Clear(); | ||
| 220 | |||
| 221 | /// Handles flags related to the priority and core number capability flags. | ||
| 222 | Result HandlePriorityCoreNumFlags(u32 flags); | ||
| 223 | |||
| 224 | /// Handles flags related to determining the allowable SVC mask. | ||
| 225 | Result HandleSyscallFlags(u32& set_svc_bits, u32 flags); | ||
| 226 | |||
| 227 | /// Handles flags related to mapping physical memory pages. | ||
| 228 | Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table); | ||
| 229 | |||
| 230 | /// Handles flags related to mapping IO pages. | ||
| 231 | Result HandleMapIOFlags(u32 flags, KPageTable& page_table); | ||
| 232 | |||
| 233 | /// Handles flags related to mapping physical memory regions. | ||
| 234 | Result HandleMapRegionFlags(u32 flags, KPageTable& page_table); | ||
| 235 | |||
| 236 | /// Handles flags related to the interrupt capability flags. | ||
| 237 | Result HandleInterruptFlags(u32 flags); | ||
| 238 | |||
| 239 | /// Handles flags related to the program type. | ||
| 240 | Result HandleProgramTypeFlags(u32 flags); | ||
| 241 | |||
| 242 | /// Handles flags related to the handle table size. | ||
| 243 | Result HandleHandleTableFlags(u32 flags); | ||
| 244 | |||
| 245 | /// Handles flags related to the kernel version capability flags. | ||
| 246 | Result HandleKernelVersionFlags(u32 flags); | ||
| 247 | |||
| 248 | /// Handles flags related to debug-specific capabilities. | ||
| 249 | Result HandleDebugFlags(u32 flags); | ||
| 250 | |||
| 251 | SyscallCapabilities svc_capabilities; | ||
| 252 | InterruptCapabilities interrupt_capabilities; | ||
| 253 | |||
| 254 | u64 core_mask = 0; | ||
| 255 | u64 priority_mask = 0; | ||
| 256 | |||
| 257 | s32 handle_table_size = 0; | ||
| 258 | u32 kernel_version = 0; | ||
| 259 | |||
| 260 | ProgramType program_type = ProgramType::SysModule; | ||
| 261 | |||
| 262 | bool is_debuggable = false; | ||
| 263 | bool can_force_debug = false; | ||
| 264 | }; | ||
| 265 | |||
| 266 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 97f1210de..4ca62860d 100644 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp | |||
| @@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) { | |||
| 29 | // Helper function that performs the common sanity checks for svcMapMemory | 29 | // Helper function that performs the common sanity checks for svcMapMemory |
| 30 | // and svcUnmapMemory. This is doable, as both functions perform their sanitizing | 30 | // and svcUnmapMemory. This is doable, as both functions perform their sanitizing |
| 31 | // in the same order. | 31 | // in the same order. |
| 32 | Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) { | 32 | Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr, |
| 33 | u64 size) { | ||
| 33 | if (!Common::Is4KBAligned(dst_addr)) { | 34 | if (!Common::Is4KBAligned(dst_addr)) { |
| 34 | LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); | 35 | LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); |
| 35 | R_THROW(ResultInvalidAddress); | 36 | R_THROW(ResultInvalidAddress); |
| @@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, | |||
| 123 | R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); | 124 | R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); |
| 124 | 125 | ||
| 125 | // Set the memory attribute. | 126 | // Set the memory attribute. |
| 126 | R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr)); | 127 | R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask), |
| 128 | static_cast<KMemoryAttribute>(attr))); | ||
| 127 | } | 129 | } |
| 128 | 130 | ||
| 129 | /// Maps a memory range into a different range. | 131 | /// Maps a memory range into a different range. |
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp index 99330d02a..793e9f8d0 100644 --- a/src/core/hle/kernel/svc/svc_physical_memory.cpp +++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp | |||
| @@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) { | |||
| 16 | R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); | 16 | R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); |
| 17 | 17 | ||
| 18 | // Set the heap size. | 18 | // Set the heap size. |
| 19 | R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size)); | 19 | KProcessAddress address{}; |
| 20 | R_TRY(GetCurrentProcess(system.Kernel()) | ||
| 21 | .GetPageTable() | ||
| 22 | .SetHeapSize(std::addressof(address), size)); | ||
| 23 | |||
| 24 | // We succeeded. | ||
| 25 | *out_address = GetInteger(address); | ||
| 26 | R_SUCCEED(); | ||
| 20 | } | 27 | } |
| 21 | 28 | ||
| 22 | /// Maps memory at a desired address | 29 | /// Maps memory at a desired address |
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp index 07cd48175..e1427947b 100644 --- a/src/core/hle/kernel/svc/svc_process_memory.cpp +++ b/src/core/hle/kernel/svc/svc_process_memory.cpp | |||
| @@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d | |||
| 247 | R_THROW(ResultInvalidCurrentMemory); | 247 | R_THROW(ResultInvalidCurrentMemory); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size, | 250 | R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size)); |
| 251 | KPageTable::ICacheInvalidationStrategy::InvalidateAll)); | ||
| 252 | } | 251 | } |
| 253 | 252 | ||
| 254 | Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, | 253 | Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address, |
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp index 51af06e97..816dcb8d0 100644 --- a/src/core/hle/kernel/svc/svc_query_memory.cpp +++ b/src/core/hle/kernel/svc/svc_query_memory.cpp | |||
| @@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn | |||
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | auto& current_memory{GetCurrentMemory(system.Kernel())}; | 33 | auto& current_memory{GetCurrentMemory(system.Kernel())}; |
| 34 | const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()}; | ||
| 35 | 34 | ||
| 36 | current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info)); | 35 | KMemoryInfo mem_info; |
| 36 | R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address)); | ||
| 37 | 37 | ||
| 38 | //! This is supposed to be part of the QueryInfo call. | 38 | const auto svc_mem_info = mem_info.GetSvcMemoryInfo(); |
| 39 | *out_page_info = {}; | 39 | current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info)); |
| 40 | 40 | ||
| 41 | R_SUCCEED(); | 41 | R_SUCCEED(); |
| 42 | } | 42 | } |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index dd0b27f47..749f51f69 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess; | |||
| 407 | 407 | ||
| 408 | /// Evaluates a boolean expression, and succeeds if that expression is true. | 408 | /// Evaluates a boolean expression, and succeeds if that expression is true. |
| 409 | #define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) | 409 | #define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess) |
| 410 | |||
| 411 | #define R_TRY_CATCH(res_expr) \ | ||
| 412 | { \ | ||
| 413 | const auto R_CURRENT_RESULT = (res_expr); \ | ||
| 414 | if (R_FAILED(R_CURRENT_RESULT)) { \ | ||
| 415 | if (false) | ||
| 416 | |||
| 417 | #define R_END_TRY_CATCH \ | ||
| 418 | else if (R_FAILED(R_CURRENT_RESULT)) { \ | ||
| 419 | R_THROW(R_CURRENT_RESULT); \ | ||
| 420 | } \ | ||
| 421 | } \ | ||
| 422 | } | ||
| 423 | |||
| 424 | #define R_CATCH_ALL() \ | ||
| 425 | } \ | ||
| 426 | else if (R_FAILED(R_CURRENT_RESULT)) { \ | ||
| 427 | if (true) | ||
| 428 | |||
| 429 | #define R_CATCH(res_expr) \ | ||
| 430 | } \ | ||
| 431 | else if ((res_expr) == (R_CURRENT_RESULT)) { \ | ||
| 432 | if (true) | ||
| 433 | |||
| 434 | #define R_CONVERT(catch_type, convert_type) \ | ||
| 435 | R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); } | ||
| 436 | |||
| 437 | #define R_CONVERT_ALL(convert_type) \ | ||
| 438 | R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); } | ||
| 439 | |||
| 440 | #define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr)) | ||
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp index 1b1c8190e..f21553644 100644 --- a/src/core/hle/service/acc/acc.cpp +++ b/src/core/hle/service/acc/acc.cpp | |||
| @@ -3,11 +3,13 @@ | |||
| 3 | 3 | ||
| 4 | #include <algorithm> | 4 | #include <algorithm> |
| 5 | #include <array> | 5 | #include <array> |
| 6 | |||
| 6 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 7 | #include "common/fs/file.h" | 8 | #include "common/fs/file.h" |
| 8 | #include "common/fs/path_util.h" | 9 | #include "common/fs/path_util.h" |
| 9 | #include "common/logging/log.h" | 10 | #include "common/logging/log.h" |
| 10 | #include "common/polyfill_ranges.h" | 11 | #include "common/polyfill_ranges.h" |
| 12 | #include "common/stb.h" | ||
| 11 | #include "common/string_util.h" | 13 | #include "common/string_util.h" |
| 12 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 13 | #include "core/constants.h" | 15 | #include "core/constants.h" |
| @@ -38,9 +40,36 @@ static std::filesystem::path GetImagePath(const Common::UUID& uuid) { | |||
| 38 | fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString()); | 40 | fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString()); |
| 39 | } | 41 | } |
| 40 | 42 | ||
| 41 | static constexpr u32 SanitizeJPEGSize(std::size_t size) { | 43 | static void JPGToMemory(void* context, void* data, int len) { |
| 44 | std::vector<u8>* jpg_image = static_cast<std::vector<u8>*>(context); | ||
| 45 | unsigned char* jpg = static_cast<unsigned char*>(data); | ||
| 46 | jpg_image->insert(jpg_image->end(), jpg, jpg + len); | ||
| 47 | } | ||
| 48 | |||
| 49 | static void SanitizeJPEGImageSize(std::vector<u8>& image) { | ||
| 42 | constexpr std::size_t max_jpeg_image_size = 0x20000; | 50 | constexpr std::size_t max_jpeg_image_size = 0x20000; |
| 43 | return static_cast<u32>(std::min(size, max_jpeg_image_size)); | 51 | constexpr int profile_dimensions = 256; |
| 52 | int original_width, original_height, color_channels; | ||
| 53 | |||
| 54 | const auto plain_image = | ||
| 55 | stbi_load_from_memory(image.data(), static_cast<int>(image.size()), &original_width, | ||
| 56 | &original_height, &color_channels, STBI_rgb); | ||
| 57 | |||
| 58 | // Resize image to match 256*256 | ||
| 59 | if (original_width != profile_dimensions || original_height != profile_dimensions) { | ||
| 60 | // Use vector instead of array to avoid overflowing the stack | ||
| 61 | std::vector<u8> out_image(profile_dimensions * profile_dimensions * STBI_rgb); | ||
| 62 | stbir_resize_uint8_srgb(plain_image, original_width, original_height, 0, out_image.data(), | ||
| 63 | profile_dimensions, profile_dimensions, 0, STBI_rgb, 0, | ||
| 64 | STBIR_FILTER_BOX); | ||
| 65 | image.clear(); | ||
| 66 | if (!stbi_write_jpg_to_func(JPGToMemory, &image, profile_dimensions, profile_dimensions, | ||
| 67 | STBI_rgb, out_image.data(), 0)) { | ||
| 68 | LOG_ERROR(Service_ACC, "Failed to resize the user provided image."); | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | image.resize(std::min(image.size(), max_jpeg_image_size)); | ||
| 44 | } | 73 | } |
| 45 | 74 | ||
| 46 | class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> { | 75 | class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> { |
| @@ -339,19 +368,20 @@ protected: | |||
| 339 | LOG_WARNING(Service_ACC, | 368 | LOG_WARNING(Service_ACC, |
| 340 | "Failed to load user provided image! Falling back to built-in backup..."); | 369 | "Failed to load user provided image! Falling back to built-in backup..."); |
| 341 | ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG); | 370 | ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG); |
| 342 | rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size())); | 371 | rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size())); |
| 343 | return; | 372 | return; |
| 344 | } | 373 | } |
| 345 | 374 | ||
| 346 | const u32 size = SanitizeJPEGSize(image.GetSize()); | 375 | std::vector<u8> buffer(image.GetSize()); |
| 347 | std::vector<u8> buffer(size); | ||
| 348 | 376 | ||
| 349 | if (image.Read(buffer) != buffer.size()) { | 377 | if (image.Read(buffer) != buffer.size()) { |
| 350 | LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image."); | 378 | LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image."); |
| 351 | } | 379 | } |
| 352 | 380 | ||
| 381 | SanitizeJPEGImageSize(buffer); | ||
| 382 | |||
| 353 | ctx.WriteBuffer(buffer); | 383 | ctx.WriteBuffer(buffer); |
| 354 | rb.Push<u32>(size); | 384 | rb.Push(static_cast<u32>(buffer.size())); |
| 355 | } | 385 | } |
| 356 | 386 | ||
| 357 | void GetImageSize(HLERequestContext& ctx) { | 387 | void GetImageSize(HLERequestContext& ctx) { |
| @@ -365,10 +395,18 @@ protected: | |||
| 365 | if (!image.IsOpen()) { | 395 | if (!image.IsOpen()) { |
| 366 | LOG_WARNING(Service_ACC, | 396 | LOG_WARNING(Service_ACC, |
| 367 | "Failed to load user provided image! Falling back to built-in backup..."); | 397 | "Failed to load user provided image! Falling back to built-in backup..."); |
| 368 | rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size())); | 398 | rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size())); |
| 369 | } else { | 399 | return; |
| 370 | rb.Push(SanitizeJPEGSize(image.GetSize())); | ||
| 371 | } | 400 | } |
| 401 | |||
| 402 | std::vector<u8> buffer(image.GetSize()); | ||
| 403 | |||
| 404 | if (image.Read(buffer) != buffer.size()) { | ||
| 405 | LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image."); | ||
| 406 | } | ||
| 407 | |||
| 408 | SanitizeJPEGImageSize(buffer); | ||
| 409 | rb.Push(static_cast<u32>(buffer.size())); | ||
| 372 | } | 410 | } |
| 373 | 411 | ||
| 374 | void Store(HLERequestContext& ctx) { | 412 | void Store(HLERequestContext& ctx) { |
diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h index f02bbc450..0bf2598b7 100644 --- a/src/core/hle/service/am/applets/applets.h +++ b/src/core/hle/service/am/applets/applets.h | |||
| @@ -69,6 +69,30 @@ enum class AppletId : u32 { | |||
| 69 | MyPage = 0x1A, | 69 | MyPage = 0x1A, |
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | enum class AppletProgramId : u64 { | ||
| 73 | QLaunch = 0x0100000000001000ull, | ||
| 74 | Auth = 0x0100000000001001ull, | ||
| 75 | Cabinet = 0x0100000000001002ull, | ||
| 76 | Controller = 0x0100000000001003ull, | ||
| 77 | DataErase = 0x0100000000001004ull, | ||
| 78 | Error = 0x0100000000001005ull, | ||
| 79 | NetConnect = 0x0100000000001006ull, | ||
| 80 | ProfileSelect = 0x0100000000001007ull, | ||
| 81 | SoftwareKeyboard = 0x0100000000001008ull, | ||
| 82 | MiiEdit = 0x0100000000001009ull, | ||
| 83 | Web = 0x010000000000100Aull, | ||
| 84 | Shop = 0x010000000000100Bull, | ||
| 85 | OverlayDisplay = 0x010000000000100Cull, | ||
| 86 | PhotoViewer = 0x010000000000100Dull, | ||
| 87 | Settings = 0x010000000000100Eull, | ||
| 88 | OfflineWeb = 0x010000000000100Full, | ||
| 89 | LoginShare = 0x0100000000001010ull, | ||
| 90 | WebAuth = 0x0100000000001011ull, | ||
| 91 | Starter = 0x0100000000001012ull, | ||
| 92 | MyPage = 0x0100000000001013ull, | ||
| 93 | MaxProgramId = 0x0100000000001FFFull, | ||
| 94 | }; | ||
| 95 | |||
| 72 | enum class LibraryAppletMode : u32 { | 96 | enum class LibraryAppletMode : u32 { |
| 73 | AllForeground = 0, | 97 | AllForeground = 0, |
| 74 | Background = 1, | 98 | Background = 1, |
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp index 221c33b86..d383a266d 100644 --- a/src/core/hle/service/hid/irs.cpp +++ b/src/core/hle/service/hid/irs.cpp | |||
| @@ -138,7 +138,7 @@ void IRS::RunMomentProcessor(HLERequestContext& ctx) { | |||
| 138 | 138 | ||
| 139 | if (result.IsSuccess()) { | 139 | if (result.IsSuccess()) { |
| 140 | auto& device = GetIrCameraSharedMemoryDeviceEntry(parameters.camera_handle); | 140 | auto& device = GetIrCameraSharedMemoryDeviceEntry(parameters.camera_handle); |
| 141 | MakeProcessor<MomentProcessor>(parameters.camera_handle, device); | 141 | MakeProcessorWithCoreContext<MomentProcessor>(parameters.camera_handle, device); |
| 142 | auto& image_transfer_processor = GetProcessor<MomentProcessor>(parameters.camera_handle); | 142 | auto& image_transfer_processor = GetProcessor<MomentProcessor>(parameters.camera_handle); |
| 143 | image_transfer_processor.SetConfig(parameters.processor_config); | 143 | image_transfer_processor.SetConfig(parameters.processor_config); |
| 144 | npad_device->SetPollingMode(Core::HID::EmulatedDeviceIndex::RightIndex, | 144 | npad_device->SetPollingMode(Core::HID::EmulatedDeviceIndex::RightIndex, |
diff --git a/src/core/hle/service/hid/irsensor/clustering_processor.cpp b/src/core/hle/service/hid/irsensor/clustering_processor.cpp index e2f4ae876..c559eb0d5 100644 --- a/src/core/hle/service/hid/irsensor/clustering_processor.cpp +++ b/src/core/hle/service/hid/irsensor/clustering_processor.cpp | |||
| @@ -3,16 +3,18 @@ | |||
| 3 | 3 | ||
| 4 | #include <queue> | 4 | #include <queue> |
| 5 | 5 | ||
| 6 | #include "core/core.h" | ||
| 7 | #include "core/core_timing.h" | ||
| 6 | #include "core/hid/emulated_controller.h" | 8 | #include "core/hid/emulated_controller.h" |
| 7 | #include "core/hid/hid_core.h" | 9 | #include "core/hid/hid_core.h" |
| 8 | #include "core/hle/service/hid/irsensor/clustering_processor.h" | 10 | #include "core/hle/service/hid/irsensor/clustering_processor.h" |
| 9 | 11 | ||
| 10 | namespace Service::IRS { | 12 | namespace Service::IRS { |
| 11 | ClusteringProcessor::ClusteringProcessor(Core::HID::HIDCore& hid_core_, | 13 | ClusteringProcessor::ClusteringProcessor(Core::System& system_, |
| 12 | Core::IrSensor::DeviceFormat& device_format, | 14 | Core::IrSensor::DeviceFormat& device_format, |
| 13 | std::size_t npad_index) | 15 | std::size_t npad_index) |
| 14 | : device{device_format} { | 16 | : device{device_format}, system{system_} { |
| 15 | npad_device = hid_core_.GetEmulatedControllerByIndex(npad_index); | 17 | npad_device = system.HIDCore().GetEmulatedControllerByIndex(npad_index); |
| 16 | 18 | ||
| 17 | device.mode = Core::IrSensor::IrSensorMode::ClusteringProcessor; | 19 | device.mode = Core::IrSensor::IrSensorMode::ClusteringProcessor; |
| 18 | device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected; | 20 | device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected; |
| @@ -48,7 +50,7 @@ void ClusteringProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType ty | |||
| 48 | } | 50 | } |
| 49 | 51 | ||
| 50 | next_state = {}; | 52 | next_state = {}; |
| 51 | const auto camera_data = npad_device->GetCamera(); | 53 | const auto& camera_data = npad_device->GetCamera(); |
| 52 | auto filtered_image = camera_data.data; | 54 | auto filtered_image = camera_data.data; |
| 53 | 55 | ||
| 54 | RemoveLowIntensityData(filtered_image); | 56 | RemoveLowIntensityData(filtered_image); |
| @@ -83,7 +85,7 @@ void ClusteringProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType ty | |||
| 83 | } | 85 | } |
| 84 | 86 | ||
| 85 | next_state.sampling_number = camera_data.sample; | 87 | next_state.sampling_number = camera_data.sample; |
| 86 | next_state.timestamp = next_state.timestamp + 131; | 88 | next_state.timestamp = system.CoreTiming().GetGlobalTimeNs().count(); |
| 87 | next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low; | 89 | next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low; |
| 88 | shared_memory->clustering_lifo.WriteNextEntry(next_state); | 90 | shared_memory->clustering_lifo.WriteNextEntry(next_state); |
| 89 | 91 | ||
| @@ -202,14 +204,14 @@ ClusteringProcessor::ClusteringData ClusteringProcessor::MergeCluster( | |||
| 202 | } | 204 | } |
| 203 | 205 | ||
| 204 | u8 ClusteringProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const { | 206 | u8 ClusteringProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const { |
| 205 | if ((y * width) + x > data.size()) { | 207 | if ((y * width) + x >= data.size()) { |
| 206 | return 0; | 208 | return 0; |
| 207 | } | 209 | } |
| 208 | return data[(y * width) + x]; | 210 | return data[(y * width) + x]; |
| 209 | } | 211 | } |
| 210 | 212 | ||
| 211 | void ClusteringProcessor::SetPixel(std::vector<u8>& data, std::size_t x, std::size_t y, u8 value) { | 213 | void ClusteringProcessor::SetPixel(std::vector<u8>& data, std::size_t x, std::size_t y, u8 value) { |
| 212 | if ((y * width) + x > data.size()) { | 214 | if ((y * width) + x >= data.size()) { |
| 213 | return; | 215 | return; |
| 214 | } | 216 | } |
| 215 | data[(y * width) + x] = value; | 217 | data[(y * width) + x] = value; |
diff --git a/src/core/hle/service/hid/irsensor/clustering_processor.h b/src/core/hle/service/hid/irsensor/clustering_processor.h index dc01a8ea7..83f34734a 100644 --- a/src/core/hle/service/hid/irsensor/clustering_processor.h +++ b/src/core/hle/service/hid/irsensor/clustering_processor.h | |||
| @@ -8,6 +8,10 @@ | |||
| 8 | #include "core/hle/service/hid/irs_ring_lifo.h" | 8 | #include "core/hle/service/hid/irs_ring_lifo.h" |
| 9 | #include "core/hle/service/hid/irsensor/processor_base.h" | 9 | #include "core/hle/service/hid/irsensor/processor_base.h" |
| 10 | 10 | ||
| 11 | namespace Core { | ||
| 12 | class System; | ||
| 13 | } | ||
| 14 | |||
| 11 | namespace Core::HID { | 15 | namespace Core::HID { |
| 12 | class EmulatedController; | 16 | class EmulatedController; |
| 13 | } // namespace Core::HID | 17 | } // namespace Core::HID |
| @@ -15,8 +19,7 @@ class EmulatedController; | |||
| 15 | namespace Service::IRS { | 19 | namespace Service::IRS { |
| 16 | class ClusteringProcessor final : public ProcessorBase { | 20 | class ClusteringProcessor final : public ProcessorBase { |
| 17 | public: | 21 | public: |
| 18 | explicit ClusteringProcessor(Core::HID::HIDCore& hid_core_, | 22 | explicit ClusteringProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format, |
| 19 | Core::IrSensor::DeviceFormat& device_format, | ||
| 20 | std::size_t npad_index); | 23 | std::size_t npad_index); |
| 21 | ~ClusteringProcessor() override; | 24 | ~ClusteringProcessor() override; |
| 22 | 25 | ||
| @@ -106,5 +109,7 @@ private: | |||
| 106 | Core::IrSensor::DeviceFormat& device; | 109 | Core::IrSensor::DeviceFormat& device; |
| 107 | Core::HID::EmulatedController* npad_device; | 110 | Core::HID::EmulatedController* npad_device; |
| 108 | int callback_key{}; | 111 | int callback_key{}; |
| 112 | |||
| 113 | Core::System& system; | ||
| 109 | }; | 114 | }; |
| 110 | } // namespace Service::IRS | 115 | } // namespace Service::IRS |
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp index 803a6277c..22067a591 100644 --- a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp +++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp | |||
| @@ -49,7 +49,7 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType | |||
| 49 | return; | 49 | return; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | const auto camera_data = npad_device->GetCamera(); | 52 | const auto& camera_data = npad_device->GetCamera(); |
| 53 | 53 | ||
| 54 | // This indicates how much ambient light is present | 54 | // This indicates how much ambient light is present |
| 55 | processor_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low; | 55 | processor_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low; |
diff --git a/src/core/hle/service/hid/irsensor/moment_processor.cpp b/src/core/hle/service/hid/irsensor/moment_processor.cpp index dbaca420a..cf045bda7 100644 --- a/src/core/hle/service/hid/irsensor/moment_processor.cpp +++ b/src/core/hle/service/hid/irsensor/moment_processor.cpp | |||
| @@ -1,24 +1,137 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | 2 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 3 | 3 | ||
| 4 | #include "core/core.h" | ||
| 5 | #include "core/core_timing.h" | ||
| 6 | #include "core/hid/emulated_controller.h" | ||
| 7 | #include "core/hid/hid_core.h" | ||
| 4 | #include "core/hle/service/hid/irsensor/moment_processor.h" | 8 | #include "core/hle/service/hid/irsensor/moment_processor.h" |
| 5 | 9 | ||
| 6 | namespace Service::IRS { | 10 | namespace Service::IRS { |
| 7 | MomentProcessor::MomentProcessor(Core::IrSensor::DeviceFormat& device_format) | 11 | static constexpr auto format = Core::IrSensor::ImageTransferProcessorFormat::Size40x30; |
| 8 | : device(device_format) { | 12 | static constexpr std::size_t ImageWidth = 40; |
| 13 | static constexpr std::size_t ImageHeight = 30; | ||
| 14 | |||
| 15 | MomentProcessor::MomentProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format, | ||
| 16 | std::size_t npad_index) | ||
| 17 | : device(device_format), system{system_} { | ||
| 18 | npad_device = system.HIDCore().GetEmulatedControllerByIndex(npad_index); | ||
| 19 | |||
| 9 | device.mode = Core::IrSensor::IrSensorMode::MomentProcessor; | 20 | device.mode = Core::IrSensor::IrSensorMode::MomentProcessor; |
| 10 | device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected; | 21 | device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected; |
| 11 | device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Stopped; | 22 | device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Stopped; |
| 23 | |||
| 24 | shared_memory = std::construct_at( | ||
| 25 | reinterpret_cast<MomentSharedMemory*>(&device_format.state.processor_raw_data)); | ||
| 26 | |||
| 27 | Core::HID::ControllerUpdateCallback engine_callback{ | ||
| 28 | .on_change = [this](Core::HID::ControllerTriggerType type) { OnControllerUpdate(type); }, | ||
| 29 | .is_npad_service = true, | ||
| 30 | }; | ||
| 31 | callback_key = npad_device->SetCallback(engine_callback); | ||
| 12 | } | 32 | } |
| 13 | 33 | ||
| 14 | MomentProcessor::~MomentProcessor() = default; | 34 | MomentProcessor::~MomentProcessor() { |
| 35 | npad_device->DeleteCallback(callback_key); | ||
| 36 | }; | ||
| 15 | 37 | ||
| 16 | void MomentProcessor::StartProcessor() {} | 38 | void MomentProcessor::StartProcessor() { |
| 39 | device.camera_status = Core::IrSensor::IrCameraStatus::Available; | ||
| 40 | device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Ready; | ||
| 41 | } | ||
| 17 | 42 | ||
| 18 | void MomentProcessor::SuspendProcessor() {} | 43 | void MomentProcessor::SuspendProcessor() {} |
| 19 | 44 | ||
| 20 | void MomentProcessor::StopProcessor() {} | 45 | void MomentProcessor::StopProcessor() {} |
| 21 | 46 | ||
| 47 | void MomentProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType type) { | ||
| 48 | if (type != Core::HID::ControllerTriggerType::IrSensor) { | ||
| 49 | return; | ||
| 50 | } | ||
| 51 | |||
| 52 | next_state = {}; | ||
| 53 | const auto& camera_data = npad_device->GetCamera(); | ||
| 54 | |||
| 55 | const auto window_width = static_cast<std::size_t>(current_config.window_of_interest.width); | ||
| 56 | const auto window_height = static_cast<std::size_t>(current_config.window_of_interest.height); | ||
| 57 | const auto window_start_x = static_cast<std::size_t>(current_config.window_of_interest.x); | ||
| 58 | const auto window_start_y = static_cast<std::size_t>(current_config.window_of_interest.y); | ||
| 59 | |||
| 60 | const std::size_t block_width = window_width / Columns; | ||
| 61 | const std::size_t block_height = window_height / Rows; | ||
| 62 | |||
| 63 | for (std::size_t row = 0; row < Rows; row++) { | ||
| 64 | for (std::size_t column = 0; column < Columns; column++) { | ||
| 65 | const size_t x_pos = (column * block_width) + window_start_x; | ||
| 66 | const size_t y_pos = (row * block_height) + window_start_y; | ||
| 67 | auto& statistic = next_state.statistic[column + (row * Columns)]; | ||
| 68 | statistic = GetStatistic(camera_data.data, x_pos, y_pos, block_width, block_height); | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | next_state.sampling_number = camera_data.sample; | ||
| 73 | next_state.timestamp = system.CoreTiming().GetGlobalTimeNs().count(); | ||
| 74 | next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low; | ||
| 75 | shared_memory->moment_lifo.WriteNextEntry(next_state); | ||
| 76 | |||
| 77 | if (!IsProcessorActive()) { | ||
| 78 | StartProcessor(); | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | u8 MomentProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const { | ||
| 83 | if ((y * ImageWidth) + x >= data.size()) { | ||
| 84 | return 0; | ||
| 85 | } | ||
| 86 | return data[(y * ImageWidth) + x]; | ||
| 87 | } | ||
| 88 | |||
| 89 | MomentProcessor::MomentStatistic MomentProcessor::GetStatistic(const std::vector<u8>& data, | ||
| 90 | std::size_t start_x, | ||
| 91 | std::size_t start_y, | ||
| 92 | std::size_t width, | ||
| 93 | std::size_t height) const { | ||
| 94 | // The actual implementation is always 320x240 | ||
| 95 | static constexpr std::size_t RealWidth = 320; | ||
| 96 | static constexpr std::size_t RealHeight = 240; | ||
| 97 | static constexpr std::size_t Threshold = 30; | ||
| 98 | MomentStatistic statistic{}; | ||
| 99 | std::size_t active_points{}; | ||
| 100 | |||
| 101 | // Sum all data points on the block that meet with the threshold | ||
| 102 | for (std::size_t y = 0; y < width; y++) { | ||
| 103 | for (std::size_t x = 0; x < height; x++) { | ||
| 104 | const size_t x_pos = x + start_x; | ||
| 105 | const size_t y_pos = y + start_y; | ||
| 106 | const auto pixel = | ||
| 107 | GetPixel(data, x_pos * ImageWidth / RealWidth, y_pos * ImageHeight / RealHeight); | ||
| 108 | |||
| 109 | if (pixel < Threshold) { | ||
| 110 | continue; | ||
| 111 | } | ||
| 112 | |||
| 113 | statistic.average_intensity += pixel; | ||
| 114 | |||
| 115 | statistic.centroid.x += static_cast<float>(x_pos); | ||
| 116 | statistic.centroid.y += static_cast<float>(y_pos); | ||
| 117 | |||
| 118 | active_points++; | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | // Return an empty field if no points were available | ||
| 123 | if (active_points == 0) { | ||
| 124 | return {}; | ||
| 125 | } | ||
| 126 | |||
| 127 | // Finally calculate the actual centroid and average intensity | ||
| 128 | statistic.centroid.x /= static_cast<float>(active_points); | ||
| 129 | statistic.centroid.y /= static_cast<float>(active_points); | ||
| 130 | statistic.average_intensity /= static_cast<f32>(width * height); | ||
| 131 | |||
| 132 | return statistic; | ||
| 133 | } | ||
| 134 | |||
| 22 | void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig config) { | 135 | void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig config) { |
| 23 | current_config.camera_config.exposure_time = config.camera_config.exposure_time; | 136 | current_config.camera_config.exposure_time = config.camera_config.exposure_time; |
| 24 | current_config.camera_config.gain = config.camera_config.gain; | 137 | current_config.camera_config.gain = config.camera_config.gain; |
| @@ -29,6 +142,8 @@ void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig conf | |||
| 29 | current_config.preprocess = | 142 | current_config.preprocess = |
| 30 | static_cast<Core::IrSensor::MomentProcessorPreprocess>(config.preprocess); | 143 | static_cast<Core::IrSensor::MomentProcessorPreprocess>(config.preprocess); |
| 31 | current_config.preprocess_intensity_threshold = config.preprocess_intensity_threshold; | 144 | current_config.preprocess_intensity_threshold = config.preprocess_intensity_threshold; |
| 145 | |||
| 146 | npad_device->SetCameraFormat(format); | ||
| 32 | } | 147 | } |
| 33 | 148 | ||
| 34 | } // namespace Service::IRS | 149 | } // namespace Service::IRS |
diff --git a/src/core/hle/service/hid/irsensor/moment_processor.h b/src/core/hle/service/hid/irsensor/moment_processor.h index d4bd22e0f..398cfbdc1 100644 --- a/src/core/hle/service/hid/irsensor/moment_processor.h +++ b/src/core/hle/service/hid/irsensor/moment_processor.h | |||
| @@ -6,12 +6,22 @@ | |||
| 6 | #include "common/bit_field.h" | 6 | #include "common/bit_field.h" |
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "core/hid/irs_types.h" | 8 | #include "core/hid/irs_types.h" |
| 9 | #include "core/hle/service/hid/irs_ring_lifo.h" | ||
| 9 | #include "core/hle/service/hid/irsensor/processor_base.h" | 10 | #include "core/hle/service/hid/irsensor/processor_base.h" |
| 10 | 11 | ||
| 12 | namespace Core { | ||
| 13 | class System; | ||
| 14 | } | ||
| 15 | |||
| 16 | namespace Core::HID { | ||
| 17 | class EmulatedController; | ||
| 18 | } // namespace Core::HID | ||
| 19 | |||
| 11 | namespace Service::IRS { | 20 | namespace Service::IRS { |
| 12 | class MomentProcessor final : public ProcessorBase { | 21 | class MomentProcessor final : public ProcessorBase { |
| 13 | public: | 22 | public: |
| 14 | explicit MomentProcessor(Core::IrSensor::DeviceFormat& device_format); | 23 | explicit MomentProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format, |
| 24 | std::size_t npad_index); | ||
| 15 | ~MomentProcessor() override; | 25 | ~MomentProcessor() override; |
| 16 | 26 | ||
| 17 | // Called when the processor is initialized | 27 | // Called when the processor is initialized |
| @@ -27,6 +37,9 @@ public: | |||
| 27 | void SetConfig(Core::IrSensor::PackedMomentProcessorConfig config); | 37 | void SetConfig(Core::IrSensor::PackedMomentProcessorConfig config); |
| 28 | 38 | ||
| 29 | private: | 39 | private: |
| 40 | static constexpr std::size_t Columns = 8; | ||
| 41 | static constexpr std::size_t Rows = 6; | ||
| 42 | |||
| 30 | // This is nn::irsensor::MomentProcessorConfig | 43 | // This is nn::irsensor::MomentProcessorConfig |
| 31 | struct MomentProcessorConfig { | 44 | struct MomentProcessorConfig { |
| 32 | Core::IrSensor::CameraConfig camera_config; | 45 | Core::IrSensor::CameraConfig camera_config; |
| @@ -50,12 +63,29 @@ private: | |||
| 50 | u64 timestamp; | 63 | u64 timestamp; |
| 51 | Core::IrSensor::CameraAmbientNoiseLevel ambient_noise_level; | 64 | Core::IrSensor::CameraAmbientNoiseLevel ambient_noise_level; |
| 52 | INSERT_PADDING_BYTES(4); | 65 | INSERT_PADDING_BYTES(4); |
| 53 | std::array<MomentStatistic, 0x30> stadistic; | 66 | std::array<MomentStatistic, Columns * Rows> statistic; |
| 54 | }; | 67 | }; |
| 55 | static_assert(sizeof(MomentProcessorState) == 0x258, "MomentProcessorState is an invalid size"); | 68 | static_assert(sizeof(MomentProcessorState) == 0x258, "MomentProcessorState is an invalid size"); |
| 56 | 69 | ||
| 70 | struct MomentSharedMemory { | ||
| 71 | Service::IRS::Lifo<MomentProcessorState, 6> moment_lifo; | ||
| 72 | }; | ||
| 73 | static_assert(sizeof(MomentSharedMemory) == 0xE20, "MomentSharedMemory is an invalid size"); | ||
| 74 | |||
| 75 | void OnControllerUpdate(Core::HID::ControllerTriggerType type); | ||
| 76 | u8 GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const; | ||
| 77 | MomentStatistic GetStatistic(const std::vector<u8>& data, std::size_t start_x, | ||
| 78 | std::size_t start_y, std::size_t width, std::size_t height) const; | ||
| 79 | |||
| 80 | MomentSharedMemory* shared_memory = nullptr; | ||
| 81 | MomentProcessorState next_state{}; | ||
| 82 | |||
| 57 | MomentProcessorConfig current_config{}; | 83 | MomentProcessorConfig current_config{}; |
| 58 | Core::IrSensor::DeviceFormat& device; | 84 | Core::IrSensor::DeviceFormat& device; |
| 85 | Core::HID::EmulatedController* npad_device; | ||
| 86 | int callback_key{}; | ||
| 87 | |||
| 88 | Core::System& system; | ||
| 59 | }; | 89 | }; |
| 60 | 90 | ||
| 61 | } // namespace Service::IRS | 91 | } // namespace Service::IRS |
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index c73035c77..97b6a9385 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp | |||
| @@ -286,9 +286,14 @@ public: | |||
| 286 | rb.Push(ResultSuccess); | 286 | rb.Push(ResultSuccess); |
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const { | 289 | bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start, |
| 290 | std::size_t size) const { | ||
| 290 | const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; | 291 | const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; |
| 291 | const auto start_info{page_table.QueryInfo(start - 1)}; | 292 | |
| 293 | Kernel::KMemoryInfo start_info; | ||
| 294 | Kernel::Svc::PageInfo page_info; | ||
| 295 | R_ASSERT( | ||
| 296 | page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1)); | ||
| 292 | 297 | ||
| 293 | if (start_info.GetState() != Kernel::KMemoryState::Free) { | 298 | if (start_info.GetState() != Kernel::KMemoryState::Free) { |
| 294 | return {}; | 299 | return {}; |
| @@ -298,7 +303,9 @@ public: | |||
| 298 | return {}; | 303 | return {}; |
| 299 | } | 304 | } |
| 300 | 305 | ||
| 301 | const auto end_info{page_table.QueryInfo(start + size)}; | 306 | Kernel::KMemoryInfo end_info; |
| 307 | R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info), | ||
| 308 | start + size)); | ||
| 302 | 309 | ||
| 303 | if (end_info.GetState() != Kernel::KMemoryState::Free) { | 310 | if (end_info.GetState() != Kernel::KMemoryState::Free) { |
| 304 | return {}; | 311 | return {}; |
| @@ -307,7 +314,7 @@ public: | |||
| 307 | return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); | 314 | return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); |
| 308 | } | 315 | } |
| 309 | 316 | ||
| 310 | Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) { | 317 | Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) { |
| 311 | size = Common::AlignUp(size, Kernel::PageSize); | 318 | size = Common::AlignUp(size, Kernel::PageSize); |
| 312 | size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; | 319 | size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; |
| 313 | 320 | ||
| @@ -391,12 +398,8 @@ public: | |||
| 391 | 398 | ||
| 392 | if (bss_size) { | 399 | if (bss_size) { |
| 393 | auto block_guard = detail::ScopeExit([&] { | 400 | auto block_guard = detail::ScopeExit([&] { |
| 394 | page_table.UnmapCodeMemory( | 401 | page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size); |
| 395 | addr + nro_size, bss_addr, bss_size, | 402 | page_table.UnmapCodeMemory(addr, nro_addr, nro_size); |
| 396 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange); | ||
| 397 | page_table.UnmapCodeMemory( | ||
| 398 | addr, nro_addr, nro_size, | ||
| 399 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange); | ||
| 400 | }); | 403 | }); |
| 401 | 404 | ||
| 402 | const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)}; | 405 | const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)}; |
| @@ -578,21 +581,17 @@ public: | |||
| 578 | auto& page_table{system.ApplicationProcess()->GetPageTable()}; | 581 | auto& page_table{system.ApplicationProcess()->GetPageTable()}; |
| 579 | 582 | ||
| 580 | if (info.bss_size != 0) { | 583 | if (info.bss_size != 0) { |
| 581 | R_TRY(page_table.UnmapCodeMemory( | 584 | R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size + |
| 582 | info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address, | 585 | info.data_size, |
| 583 | info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | 586 | info.bss_address, info.bss_size)); |
| 584 | } | 587 | } |
| 585 | 588 | ||
| 586 | R_TRY(page_table.UnmapCodeMemory( | 589 | R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size, |
| 587 | info.nro_address + info.text_size + info.ro_size, | 590 | info.src_addr + info.text_size + info.ro_size, |
| 588 | info.src_addr + info.text_size + info.ro_size, info.data_size, | 591 | info.data_size)); |
| 589 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | 592 | R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size, |
| 590 | R_TRY(page_table.UnmapCodeMemory( | 593 | info.src_addr + info.text_size, info.ro_size)); |
| 591 | info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size, | 594 | R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size)); |
| 592 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | ||
| 593 | R_TRY(page_table.UnmapCodeMemory( | ||
| 594 | info.nro_address, info.src_addr, info.text_size, | ||
| 595 | Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange)); | ||
| 596 | return ResultSuccess; | 595 | return ResultSuccess; |
| 597 | } | 596 | } |
| 598 | 597 | ||
diff --git a/src/core/hle/service/nvdrv/devices/ioctl_serialization.h b/src/core/hle/service/nvdrv/devices/ioctl_serialization.h new file mode 100644 index 000000000..b12bcd138 --- /dev/null +++ b/src/core/hle/service/nvdrv/devices/ioctl_serialization.h | |||
| @@ -0,0 +1,159 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <span> | ||
| 7 | #include <vector> | ||
| 8 | |||
| 9 | #include "common/concepts.h" | ||
| 10 | #include "core/hle/service/nvdrv/devices/nvdevice.h" | ||
| 11 | |||
| 12 | namespace Service::Nvidia::Devices { | ||
| 13 | |||
| 14 | struct IoctlOneArgTraits { | ||
| 15 | template <typename T, typename R, typename A, typename... B> | ||
| 16 | static A GetFirstArgImpl(R (T::*)(A, B...)); | ||
| 17 | }; | ||
| 18 | |||
| 19 | struct IoctlTwoArgTraits { | ||
| 20 | template <typename T, typename R, typename A, typename B, typename... C> | ||
| 21 | static A GetFirstArgImpl(R (T::*)(A, B, C...)); | ||
| 22 | |||
| 23 | template <typename T, typename R, typename A, typename B, typename... C> | ||
| 24 | static B GetSecondArgImpl(R (T::*)(A, B, C...)); | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct Null {}; | ||
| 28 | |||
| 29 | // clang-format off | ||
| 30 | |||
| 31 | template <typename FixedArg, typename VarArg, typename InlInVarArg, typename InlOutVarArg, typename F> | ||
| 32 | NvResult WrapGeneric(F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, std::span<u8> inline_output) { | ||
| 33 | constexpr bool HasFixedArg = !std::is_same_v<FixedArg, Null>; | ||
| 34 | constexpr bool HasVarArg = !std::is_same_v<VarArg, Null>; | ||
| 35 | constexpr bool HasInlInVarArg = !std::is_same_v<InlInVarArg, Null>; | ||
| 36 | constexpr bool HasInlOutVarArg = !std::is_same_v<InlOutVarArg, Null>; | ||
| 37 | |||
| 38 | // Declare the fixed-size input value. | ||
| 39 | FixedArg fixed{}; | ||
| 40 | size_t var_offset = 0; | ||
| 41 | |||
| 42 | if constexpr (HasFixedArg) { | ||
| 43 | // Read the fixed-size input value. | ||
| 44 | var_offset = std::min(sizeof(FixedArg), input.size()); | ||
| 45 | if (var_offset > 0) { | ||
| 46 | std::memcpy(&fixed, input.data(), var_offset); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | // Read the variable-sized inputs. | ||
| 51 | const size_t num_var_args = HasVarArg ? ((input.size() - var_offset) / sizeof(VarArg)) : 0; | ||
| 52 | std::vector<VarArg> var_args(num_var_args); | ||
| 53 | if constexpr (HasVarArg) { | ||
| 54 | if (num_var_args > 0) { | ||
| 55 | std::memcpy(var_args.data(), input.data() + var_offset, num_var_args * sizeof(VarArg)); | ||
| 56 | } | ||
| 57 | } | ||
| 58 | |||
| 59 | const size_t num_inl_in_var_args = HasInlInVarArg ? (inline_input.size() / sizeof(InlInVarArg)) : 0; | ||
| 60 | std::vector<InlInVarArg> inl_in_var_args(num_inl_in_var_args); | ||
| 61 | if constexpr (HasInlInVarArg) { | ||
| 62 | if (num_inl_in_var_args > 0) { | ||
| 63 | std::memcpy(inl_in_var_args.data(), inline_input.data(), num_inl_in_var_args * sizeof(InlInVarArg)); | ||
| 64 | } | ||
| 65 | } | ||
| 66 | |||
| 67 | // Construct inline output data. | ||
| 68 | const size_t num_inl_out_var_args = HasInlOutVarArg ? (inline_output.size() / sizeof(InlOutVarArg)) : 0; | ||
| 69 | std::vector<InlOutVarArg> inl_out_var_args(num_inl_out_var_args); | ||
| 70 | |||
| 71 | // Perform the call. | ||
| 72 | NvResult result = callable(fixed, var_args, inl_in_var_args, inl_out_var_args); | ||
| 73 | |||
| 74 | // Copy outputs. | ||
| 75 | if constexpr (HasFixedArg) { | ||
| 76 | if (output.size() > 0) { | ||
| 77 | std::memcpy(output.data(), &fixed, std::min(output.size(), sizeof(FixedArg))); | ||
| 78 | } | ||
| 79 | } | ||
| 80 | |||
| 81 | if constexpr (HasVarArg) { | ||
| 82 | if (num_var_args > 0 && output.size() > var_offset) { | ||
| 83 | const size_t max_var_size = output.size() - var_offset; | ||
| 84 | std::memcpy(output.data() + var_offset, var_args.data(), std::min(max_var_size, num_var_args * sizeof(VarArg))); | ||
| 85 | } | ||
| 86 | } | ||
| 87 | |||
| 88 | // Copy inline outputs. | ||
| 89 | if constexpr (HasInlOutVarArg) { | ||
| 90 | if (num_inl_out_var_args > 0) { | ||
| 91 | std::memcpy(inline_output.data(), inl_out_var_args.data(), num_inl_out_var_args * sizeof(InlOutVarArg)); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 95 | // We're done. | ||
| 96 | return result; | ||
| 97 | } | ||
| 98 | |||
| 99 | template <typename Self, typename F, typename... Rest> | ||
| 100 | NvResult WrapFixed(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) { | ||
| 101 | using FixedArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>; | ||
| 102 | |||
| 103 | const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult { | ||
| 104 | return (self->*callable)(fixed, std::forward<Rest>(rest)...); | ||
| 105 | }; | ||
| 106 | |||
| 107 | return WrapGeneric<FixedArg, Null, Null, Null>(std::move(Callable), input, {}, output, {}); | ||
| 108 | } | ||
| 109 | |||
| 110 | template <typename Self, typename F, typename... Rest> | ||
| 111 | NvResult WrapFixedInlOut(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, std::span<u8> inline_output, Rest&&... rest) { | ||
| 112 | using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>; | ||
| 113 | using InlOutVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type; | ||
| 114 | |||
| 115 | const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult { | ||
| 116 | return (self->*callable)(fixed, inl_out, std::forward<Rest>(rest)...); | ||
| 117 | }; | ||
| 118 | |||
| 119 | return WrapGeneric<FixedArg, Null, Null, InlOutVarArg>(std::move(Callable), input, {}, output, inline_output); | ||
| 120 | } | ||
| 121 | |||
| 122 | template <typename Self, typename F, typename... Rest> | ||
| 123 | NvResult WrapVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) { | ||
| 124 | using VarArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>::value_type; | ||
| 125 | |||
| 126 | const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult { | ||
| 127 | return (self->*callable)(var, std::forward<Rest>(rest)...); | ||
| 128 | }; | ||
| 129 | |||
| 130 | return WrapGeneric<Null, VarArg, Null, Null>(std::move(Callable), input, {}, output, {}); | ||
| 131 | } | ||
| 132 | |||
| 133 | template <typename Self, typename F, typename... Rest> | ||
| 134 | NvResult WrapFixedVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) { | ||
| 135 | using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>; | ||
| 136 | using VarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type; | ||
| 137 | |||
| 138 | const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult { | ||
| 139 | return (self->*callable)(fixed, var, std::forward<Rest>(rest)...); | ||
| 140 | }; | ||
| 141 | |||
| 142 | return WrapGeneric<FixedArg, VarArg, Null, Null>(std::move(Callable), input, {}, output, {}); | ||
| 143 | } | ||
| 144 | |||
| 145 | template <typename Self, typename F, typename... Rest> | ||
| 146 | NvResult WrapFixedInlIn(Self* self, F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, Rest&&... rest) { | ||
| 147 | using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>; | ||
| 148 | using InlInVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type; | ||
| 149 | |||
| 150 | const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult { | ||
| 151 | return (self->*callable)(fixed, inl_in, std::forward<Rest>(rest)...); | ||
| 152 | }; | ||
| 153 | |||
| 154 | return WrapGeneric<FixedArg, Null, InlInVarArg, Null>(std::move(Callable), input, inline_input, output, {}); | ||
| 155 | } | ||
| 156 | |||
| 157 | // clang-format on | ||
| 158 | |||
| 159 | } // namespace Service::Nvidia::Devices | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 7d7bb8687..6b3639008 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "core/core.h" | 11 | #include "core/core.h" |
| 12 | #include "core/hle/service/nvdrv/core/container.h" | 12 | #include "core/hle/service/nvdrv/core/container.h" |
| 13 | #include "core/hle/service/nvdrv/core/nvmap.h" | 13 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 14 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 14 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" | 15 | #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" |
| 15 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" | 16 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" |
| 16 | #include "core/hle/service/nvdrv/nvdrv.h" | 17 | #include "core/hle/service/nvdrv/nvdrv.h" |
| @@ -33,21 +34,21 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> i | |||
| 33 | case 'A': | 34 | case 'A': |
| 34 | switch (command.cmd) { | 35 | switch (command.cmd) { |
| 35 | case 0x1: | 36 | case 0x1: |
| 36 | return BindChannel(input, output); | 37 | return WrapFixed(this, &nvhost_as_gpu::BindChannel, input, output); |
| 37 | case 0x2: | 38 | case 0x2: |
| 38 | return AllocateSpace(input, output); | 39 | return WrapFixed(this, &nvhost_as_gpu::AllocateSpace, input, output); |
| 39 | case 0x3: | 40 | case 0x3: |
| 40 | return FreeSpace(input, output); | 41 | return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output); |
| 41 | case 0x5: | 42 | case 0x5: |
| 42 | return UnmapBuffer(input, output); | 43 | return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output); |
| 43 | case 0x6: | 44 | case 0x6: |
| 44 | return MapBufferEx(input, output); | 45 | return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output); |
| 45 | case 0x8: | 46 | case 0x8: |
| 46 | return GetVARegions(input, output); | 47 | return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output); |
| 47 | case 0x9: | 48 | case 0x9: |
| 48 | return AllocAsEx(input, output); | 49 | return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output); |
| 49 | case 0x14: | 50 | case 0x14: |
| 50 | return Remap(input, output); | 51 | return WrapVariable(this, &nvhost_as_gpu::Remap, input, output); |
| 51 | default: | 52 | default: |
| 52 | break; | 53 | break; |
| 53 | } | 54 | } |
| @@ -72,7 +73,8 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i | |||
| 72 | case 'A': | 73 | case 'A': |
| 73 | switch (command.cmd) { | 74 | switch (command.cmd) { |
| 74 | case 0x8: | 75 | case 0x8: |
| 75 | return GetVARegions(input, output, inline_output); | 76 | return WrapFixedInlOut(this, &nvhost_as_gpu::GetVARegions3, input, output, |
| 77 | inline_output); | ||
| 76 | default: | 78 | default: |
| 77 | break; | 79 | break; |
| 78 | } | 80 | } |
| @@ -87,10 +89,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i | |||
| 87 | void nvhost_as_gpu::OnOpen(DeviceFD fd) {} | 89 | void nvhost_as_gpu::OnOpen(DeviceFD fd) {} |
| 88 | void nvhost_as_gpu::OnClose(DeviceFD fd) {} | 90 | void nvhost_as_gpu::OnClose(DeviceFD fd) {} |
| 89 | 91 | ||
| 90 | NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> output) { | 92 | NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { |
| 91 | IoctlAllocAsEx params{}; | ||
| 92 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 93 | |||
| 94 | LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); | 93 | LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); |
| 95 | 94 | ||
| 96 | std::scoped_lock lock(mutex); | 95 | std::scoped_lock lock(mutex); |
| @@ -141,10 +140,7 @@ NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> outpu | |||
| 141 | return NvResult::Success; | 140 | return NvResult::Success; |
| 142 | } | 141 | } |
| 143 | 142 | ||
| 144 | NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> output) { | 143 | NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) { |
| 145 | IoctlAllocSpace params{}; | ||
| 146 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 147 | |||
| 148 | LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, | 144 | LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, |
| 149 | params.page_size, params.flags); | 145 | params.page_size, params.flags); |
| 150 | 146 | ||
| @@ -194,7 +190,6 @@ NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> o | |||
| 194 | .big_pages = params.page_size != VM::YUZU_PAGESIZE, | 190 | .big_pages = params.page_size != VM::YUZU_PAGESIZE, |
| 195 | }; | 191 | }; |
| 196 | 192 | ||
| 197 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 198 | return NvResult::Success; | 193 | return NvResult::Success; |
| 199 | } | 194 | } |
| 200 | 195 | ||
| @@ -222,10 +217,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) { | |||
| 222 | mapping_map.erase(offset); | 217 | mapping_map.erase(offset); |
| 223 | } | 218 | } |
| 224 | 219 | ||
| 225 | NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> output) { | 220 | NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) { |
| 226 | IoctlFreeSpace params{}; | ||
| 227 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 228 | |||
| 229 | LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, | 221 | LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, |
| 230 | params.pages, params.page_size); | 222 | params.pages, params.page_size); |
| 231 | 223 | ||
| @@ -264,18 +256,11 @@ NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> outpu | |||
| 264 | return NvResult::BadValue; | 256 | return NvResult::BadValue; |
| 265 | } | 257 | } |
| 266 | 258 | ||
| 267 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 268 | return NvResult::Success; | 259 | return NvResult::Success; |
| 269 | } | 260 | } |
| 270 | 261 | ||
| 271 | NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) { | 262 | NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) { |
| 272 | const auto num_entries = input.size() / sizeof(IoctlRemapEntry); | 263 | LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size()); |
| 273 | |||
| 274 | LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); | ||
| 275 | |||
| 276 | std::scoped_lock lock(mutex); | ||
| 277 | entries.resize_destructive(num_entries); | ||
| 278 | std::memcpy(entries.data(), input.data(), input.size()); | ||
| 279 | 264 | ||
| 280 | if (!vm.initialised) { | 265 | if (!vm.initialised) { |
| 281 | return NvResult::BadValue; | 266 | return NvResult::BadValue; |
| @@ -317,14 +302,10 @@ NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) { | |||
| 317 | } | 302 | } |
| 318 | } | 303 | } |
| 319 | 304 | ||
| 320 | std::memcpy(output.data(), entries.data(), output.size()); | ||
| 321 | return NvResult::Success; | 305 | return NvResult::Success; |
| 322 | } | 306 | } |
| 323 | 307 | ||
| 324 | NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> output) { | 308 | NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { |
| 325 | IoctlMapBufferEx params{}; | ||
| 326 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 327 | |||
| 328 | LOG_DEBUG(Service_NVDRV, | 309 | LOG_DEBUG(Service_NVDRV, |
| 329 | "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" | 310 | "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" |
| 330 | ", offset={}", | 311 | ", offset={}", |
| @@ -421,14 +402,10 @@ NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> out | |||
| 421 | mapping_map[params.offset] = mapping; | 402 | mapping_map[params.offset] = mapping; |
| 422 | } | 403 | } |
| 423 | 404 | ||
| 424 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 425 | return NvResult::Success; | 405 | return NvResult::Success; |
| 426 | } | 406 | } |
| 427 | 407 | ||
| 428 | NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> output) { | 408 | NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) { |
| 429 | IoctlUnmapBuffer params{}; | ||
| 430 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 431 | |||
| 432 | LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); | 409 | LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); |
| 433 | 410 | ||
| 434 | std::scoped_lock lock(mutex); | 411 | std::scoped_lock lock(mutex); |
| @@ -464,9 +441,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> out | |||
| 464 | return NvResult::Success; | 441 | return NvResult::Success; |
| 465 | } | 442 | } |
| 466 | 443 | ||
| 467 | NvResult nvhost_as_gpu::BindChannel(std::span<const u8> input, std::span<u8> output) { | 444 | NvResult nvhost_as_gpu::BindChannel(IoctlBindChannel& params) { |
| 468 | IoctlBindChannel params{}; | ||
| 469 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 470 | LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); | 445 | LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd); |
| 471 | 446 | ||
| 472 | auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd); | 447 | auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd); |
| @@ -493,10 +468,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { | |||
| 493 | }; | 468 | }; |
| 494 | } | 469 | } |
| 495 | 470 | ||
| 496 | NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output) { | 471 | NvResult nvhost_as_gpu::GetVARegions1(IoctlGetVaRegions& params) { |
| 497 | IoctlGetVaRegions params{}; | ||
| 498 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 499 | |||
| 500 | LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, | 472 | LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, |
| 501 | params.buf_size); | 473 | params.buf_size); |
| 502 | 474 | ||
| @@ -508,15 +480,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou | |||
| 508 | 480 | ||
| 509 | GetVARegionsImpl(params); | 481 | GetVARegionsImpl(params); |
| 510 | 482 | ||
| 511 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 512 | return NvResult::Success; | 483 | return NvResult::Success; |
| 513 | } | 484 | } |
| 514 | 485 | ||
| 515 | NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output, | 486 | NvResult nvhost_as_gpu::GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions) { |
| 516 | std::span<u8> inline_output) { | ||
| 517 | IoctlGetVaRegions params{}; | ||
| 518 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 519 | |||
| 520 | LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, | 487 | LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr, |
| 521 | params.buf_size); | 488 | params.buf_size); |
| 522 | 489 | ||
| @@ -528,9 +495,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou | |||
| 528 | 495 | ||
| 529 | GetVARegionsImpl(params); | 496 | GetVARegionsImpl(params); |
| 530 | 497 | ||
| 531 | std::memcpy(output.data(), ¶ms, output.size()); | 498 | const size_t num_regions = std::min(params.regions.size(), regions.size()); |
| 532 | std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion)); | 499 | for (size_t i = 0; i < num_regions; i++) { |
| 533 | std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion)); | 500 | regions[i] = params.regions[i]; |
| 501 | } | ||
| 534 | 502 | ||
| 535 | return NvResult::Success; | 503 | return NvResult::Success; |
| 536 | } | 504 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 2af3e1260..932997e75 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | |||
| @@ -139,18 +139,17 @@ private: | |||
| 139 | static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2, | 139 | static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2, |
| 140 | "IoctlGetVaRegions is incorrect size"); | 140 | "IoctlGetVaRegions is incorrect size"); |
| 141 | 141 | ||
| 142 | NvResult AllocAsEx(std::span<const u8> input, std::span<u8> output); | 142 | NvResult AllocAsEx(IoctlAllocAsEx& params); |
| 143 | NvResult AllocateSpace(std::span<const u8> input, std::span<u8> output); | 143 | NvResult AllocateSpace(IoctlAllocSpace& params); |
| 144 | NvResult Remap(std::span<const u8> input, std::span<u8> output); | 144 | NvResult Remap(std::span<IoctlRemapEntry> params); |
| 145 | NvResult MapBufferEx(std::span<const u8> input, std::span<u8> output); | 145 | NvResult MapBufferEx(IoctlMapBufferEx& params); |
| 146 | NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output); | 146 | NvResult UnmapBuffer(IoctlUnmapBuffer& params); |
| 147 | NvResult FreeSpace(std::span<const u8> input, std::span<u8> output); | 147 | NvResult FreeSpace(IoctlFreeSpace& params); |
| 148 | NvResult BindChannel(std::span<const u8> input, std::span<u8> output); | 148 | NvResult BindChannel(IoctlBindChannel& params); |
| 149 | 149 | ||
| 150 | void GetVARegionsImpl(IoctlGetVaRegions& params); | 150 | void GetVARegionsImpl(IoctlGetVaRegions& params); |
| 151 | NvResult GetVARegions(std::span<const u8> input, std::span<u8> output); | 151 | NvResult GetVARegions1(IoctlGetVaRegions& params); |
| 152 | NvResult GetVARegions(std::span<const u8> input, std::span<u8> output, | 152 | NvResult GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions); |
| 153 | std::span<u8> inline_output); | ||
| 154 | 153 | ||
| 155 | void FreeMappingLocked(u64 offset); | 154 | void FreeMappingLocked(u64 offset); |
| 156 | 155 | ||
| @@ -213,7 +212,6 @@ private: | |||
| 213 | bool initialised{}; | 212 | bool initialised{}; |
| 214 | } vm; | 213 | } vm; |
| 215 | std::shared_ptr<Tegra::MemoryManager> gmmu; | 214 | std::shared_ptr<Tegra::MemoryManager> gmmu; |
| 216 | Common::ScratchBuffer<IoctlRemapEntry> entries; | ||
| 217 | 215 | ||
| 218 | // s32 channel{}; | 216 | // s32 channel{}; |
| 219 | // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; | 217 | // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 4d55554b4..b8dd34e24 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "core/hle/kernel/k_event.h" | 14 | #include "core/hle/kernel/k_event.h" |
| 15 | #include "core/hle/service/nvdrv/core/container.h" | 15 | #include "core/hle/service/nvdrv/core/container.h" |
| 16 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | 16 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" |
| 17 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 17 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" | 18 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" |
| 18 | #include "video_core/gpu.h" | 19 | #include "video_core/gpu.h" |
| 19 | #include "video_core/host1x/host1x.h" | 20 | #include "video_core/host1x/host1x.h" |
| @@ -40,19 +41,19 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inp | |||
| 40 | case 0x0: | 41 | case 0x0: |
| 41 | switch (command.cmd) { | 42 | switch (command.cmd) { |
| 42 | case 0x1b: | 43 | case 0x1b: |
| 43 | return NvOsGetConfigU32(input, output); | 44 | return WrapFixed(this, &nvhost_ctrl::NvOsGetConfigU32, input, output); |
| 44 | case 0x1c: | 45 | case 0x1c: |
| 45 | return IocCtrlClearEventWait(input, output); | 46 | return WrapFixed(this, &nvhost_ctrl::IocCtrlClearEventWait, input, output); |
| 46 | case 0x1d: | 47 | case 0x1d: |
| 47 | return IocCtrlEventWait(input, output, true); | 48 | return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, true); |
| 48 | case 0x1e: | 49 | case 0x1e: |
| 49 | return IocCtrlEventWait(input, output, false); | 50 | return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, false); |
| 50 | case 0x1f: | 51 | case 0x1f: |
| 51 | return IocCtrlEventRegister(input, output); | 52 | return WrapFixed(this, &nvhost_ctrl::IocCtrlEventRegister, input, output); |
| 52 | case 0x20: | 53 | case 0x20: |
| 53 | return IocCtrlEventUnregister(input, output); | 54 | return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregister, input, output); |
| 54 | case 0x21: | 55 | case 0x21: |
| 55 | return IocCtrlEventUnregisterBatch(input, output); | 56 | return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregisterBatch, input, output); |
| 56 | } | 57 | } |
| 57 | break; | 58 | break; |
| 58 | default: | 59 | default: |
| @@ -79,25 +80,19 @@ void nvhost_ctrl::OnOpen(DeviceFD fd) {} | |||
| 79 | 80 | ||
| 80 | void nvhost_ctrl::OnClose(DeviceFD fd) {} | 81 | void nvhost_ctrl::OnClose(DeviceFD fd) {} |
| 81 | 82 | ||
| 82 | NvResult nvhost_ctrl::NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output) { | 83 | NvResult nvhost_ctrl::NvOsGetConfigU32(IocGetConfigParams& params) { |
| 83 | IocGetConfigParams params{}; | ||
| 84 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 85 | LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(), | 84 | LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(), |
| 86 | params.param_str.data()); | 85 | params.param_str.data()); |
| 87 | return NvResult::ConfigVarNotFound; // Returns error on production mode | 86 | return NvResult::ConfigVarNotFound; // Returns error on production mode |
| 88 | } | 87 | } |
| 89 | 88 | ||
| 90 | NvResult nvhost_ctrl::IocCtrlEventWait(std::span<const u8> input, std::span<u8> output, | 89 | NvResult nvhost_ctrl::IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation) { |
| 91 | bool is_allocation) { | ||
| 92 | IocCtrlEventWaitParams params{}; | ||
| 93 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 94 | LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}", | 90 | LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}", |
| 95 | params.fence.id, params.fence.value, params.timeout, is_allocation); | 91 | params.fence.id, params.fence.value, params.timeout, is_allocation); |
| 96 | 92 | ||
| 97 | bool must_unmark_fail = !is_allocation; | 93 | bool must_unmark_fail = !is_allocation; |
| 98 | const u32 event_id = params.value.raw; | 94 | const u32 event_id = params.value.raw; |
| 99 | SCOPE_EXIT({ | 95 | SCOPE_EXIT({ |
| 100 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 101 | if (must_unmark_fail) { | 96 | if (must_unmark_fail) { |
| 102 | events[event_id].fails = 0; | 97 | events[event_id].fails = 0; |
| 103 | } | 98 | } |
| @@ -231,9 +226,7 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) { | |||
| 231 | return NvResult::Success; | 226 | return NvResult::Success; |
| 232 | } | 227 | } |
| 233 | 228 | ||
| 234 | NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output) { | 229 | NvResult nvhost_ctrl::IocCtrlEventRegister(IocCtrlEventRegisterParams& params) { |
| 235 | IocCtrlEventRegisterParams params{}; | ||
| 236 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 237 | const u32 event_id = params.user_event_id; | 230 | const u32 event_id = params.user_event_id; |
| 238 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); | 231 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); |
| 239 | if (event_id >= MaxNvEvents) { | 232 | if (event_id >= MaxNvEvents) { |
| @@ -252,9 +245,7 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span< | |||
| 252 | return NvResult::Success; | 245 | return NvResult::Success; |
| 253 | } | 246 | } |
| 254 | 247 | ||
| 255 | NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output) { | 248 | NvResult nvhost_ctrl::IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params) { |
| 256 | IocCtrlEventUnregisterParams params{}; | ||
| 257 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 258 | const u32 event_id = params.user_event_id & 0x00FF; | 249 | const u32 event_id = params.user_event_id & 0x00FF; |
| 259 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); | 250 | LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); |
| 260 | 251 | ||
| @@ -262,9 +253,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::spa | |||
| 262 | return FreeEvent(event_id); | 253 | return FreeEvent(event_id); |
| 263 | } | 254 | } |
| 264 | 255 | ||
| 265 | NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output) { | 256 | NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params) { |
| 266 | IocCtrlEventUnregisterBatchParams params{}; | ||
| 267 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 268 | u64 event_mask = params.user_events; | 257 | u64 event_mask = params.user_events; |
| 269 | LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask); | 258 | LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask); |
| 270 | 259 | ||
| @@ -280,10 +269,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std | |||
| 280 | return NvResult::Success; | 269 | return NvResult::Success; |
| 281 | } | 270 | } |
| 282 | 271 | ||
| 283 | NvResult nvhost_ctrl::IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output) { | 272 | NvResult nvhost_ctrl::IocCtrlClearEventWait(IocCtrlEventClearParams& params) { |
| 284 | IocCtrlEventClearParams params{}; | ||
| 285 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 286 | |||
| 287 | u32 event_id = params.event_id.slot; | 273 | u32 event_id = params.event_id.slot; |
| 288 | LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id); | 274 | LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id); |
| 289 | 275 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 2efed4862..992124b60 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | |||
| @@ -186,12 +186,12 @@ private: | |||
| 186 | static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, | 186 | static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8, |
| 187 | "IocCtrlEventKill is incorrect size"); | 187 | "IocCtrlEventKill is incorrect size"); |
| 188 | 188 | ||
| 189 | NvResult NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output); | 189 | NvResult NvOsGetConfigU32(IocGetConfigParams& params); |
| 190 | NvResult IocCtrlEventWait(std::span<const u8> input, std::span<u8> output, bool is_allocation); | 190 | NvResult IocCtrlEventRegister(IocCtrlEventRegisterParams& params); |
| 191 | NvResult IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output); | 191 | NvResult IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params); |
| 192 | NvResult IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output); | 192 | NvResult IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params); |
| 193 | NvResult IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output); | 193 | NvResult IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation); |
| 194 | NvResult IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output); | 194 | NvResult IocCtrlClearEventWait(IocCtrlEventClearParams& params); |
| 195 | 195 | ||
| 196 | NvResult FreeEvent(u32 slot); | 196 | NvResult FreeEvent(u32 slot); |
| 197 | 197 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 6081d92e9..61a2df121 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/core_timing.h" | 8 | #include "core/core_timing.h" |
| 9 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 9 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" | 10 | #include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" |
| 10 | #include "core/hle/service/nvdrv/nvdrv.h" | 11 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 11 | 12 | ||
| @@ -27,23 +28,23 @@ NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> | |||
| 27 | case 'G': | 28 | case 'G': |
| 28 | switch (command.cmd) { | 29 | switch (command.cmd) { |
| 29 | case 0x1: | 30 | case 0x1: |
| 30 | return ZCullGetCtxSize(input, output); | 31 | return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetCtxSize, input, output); |
| 31 | case 0x2: | 32 | case 0x2: |
| 32 | return ZCullGetInfo(input, output); | 33 | return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetInfo, input, output); |
| 33 | case 0x3: | 34 | case 0x3: |
| 34 | return ZBCSetTable(input, output); | 35 | return WrapFixed(this, &nvhost_ctrl_gpu::ZBCSetTable, input, output); |
| 35 | case 0x4: | 36 | case 0x4: |
| 36 | return ZBCQueryTable(input, output); | 37 | return WrapFixed(this, &nvhost_ctrl_gpu::ZBCQueryTable, input, output); |
| 37 | case 0x5: | 38 | case 0x5: |
| 38 | return GetCharacteristics(input, output); | 39 | return WrapFixed(this, &nvhost_ctrl_gpu::GetCharacteristics1, input, output); |
| 39 | case 0x6: | 40 | case 0x6: |
| 40 | return GetTPCMasks(input, output); | 41 | return WrapFixed(this, &nvhost_ctrl_gpu::GetTPCMasks1, input, output); |
| 41 | case 0x7: | 42 | case 0x7: |
| 42 | return FlushL2(input, output); | 43 | return WrapFixed(this, &nvhost_ctrl_gpu::FlushL2, input, output); |
| 43 | case 0x14: | 44 | case 0x14: |
| 44 | return GetActiveSlotMask(input, output); | 45 | return WrapFixed(this, &nvhost_ctrl_gpu::GetActiveSlotMask, input, output); |
| 45 | case 0x1c: | 46 | case 0x1c: |
| 46 | return GetGpuTime(input, output); | 47 | return WrapFixed(this, &nvhost_ctrl_gpu::GetGpuTime, input, output); |
| 47 | default: | 48 | default: |
| 48 | break; | 49 | break; |
| 49 | } | 50 | } |
| @@ -65,9 +66,11 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> | |||
| 65 | case 'G': | 66 | case 'G': |
| 66 | switch (command.cmd) { | 67 | switch (command.cmd) { |
| 67 | case 0x5: | 68 | case 0x5: |
| 68 | return GetCharacteristics(input, output, inline_output); | 69 | return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetCharacteristics3, input, output, |
| 70 | inline_output); | ||
| 69 | case 0x6: | 71 | case 0x6: |
| 70 | return GetTPCMasks(input, output, inline_output); | 72 | return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetTPCMasks3, input, output, |
| 73 | inline_output); | ||
| 71 | default: | 74 | default: |
| 72 | break; | 75 | break; |
| 73 | } | 76 | } |
| @@ -82,10 +85,8 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> | |||
| 82 | void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {} | 85 | void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {} |
| 83 | void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {} | 86 | void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {} |
| 84 | 87 | ||
| 85 | NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output) { | 88 | NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) { |
| 86 | LOG_DEBUG(Service_NVDRV, "called"); | 89 | LOG_DEBUG(Service_NVDRV, "called"); |
| 87 | IoctlCharacteristics params{}; | ||
| 88 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 89 | params.gc.arch = 0x120; | 90 | params.gc.arch = 0x120; |
| 90 | params.gc.impl = 0xb; | 91 | params.gc.impl = 0xb; |
| 91 | params.gc.rev = 0xa1; | 92 | params.gc.rev = 0xa1; |
| @@ -123,15 +124,13 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa | |||
| 123 | params.gc.gr_compbit_store_base_hw = 0x0; | 124 | params.gc.gr_compbit_store_base_hw = 0x0; |
| 124 | params.gpu_characteristics_buf_size = 0xA0; | 125 | params.gpu_characteristics_buf_size = 0xA0; |
| 125 | params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED) | 126 | params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED) |
| 126 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 127 | return NvResult::Success; | 127 | return NvResult::Success; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output, | 130 | NvResult nvhost_ctrl_gpu::GetCharacteristics3( |
| 131 | std::span<u8> inline_output) { | 131 | IoctlCharacteristics& params, std::span<IoctlGpuCharacteristics> gpu_characteristics) { |
| 132 | LOG_DEBUG(Service_NVDRV, "called"); | 132 | LOG_DEBUG(Service_NVDRV, "called"); |
| 133 | IoctlCharacteristics params{}; | 133 | |
| 134 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 135 | params.gc.arch = 0x120; | 134 | params.gc.arch = 0x120; |
| 136 | params.gc.impl = 0xb; | 135 | params.gc.impl = 0xb; |
| 137 | params.gc.rev = 0xa1; | 136 | params.gc.rev = 0xa1; |
| @@ -169,70 +168,47 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa | |||
| 169 | params.gc.gr_compbit_store_base_hw = 0x0; | 168 | params.gc.gr_compbit_store_base_hw = 0x0; |
| 170 | params.gpu_characteristics_buf_size = 0xA0; | 169 | params.gpu_characteristics_buf_size = 0xA0; |
| 171 | params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED) | 170 | params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED) |
| 172 | 171 | if (!gpu_characteristics.empty()) { | |
| 173 | std::memcpy(output.data(), ¶ms, output.size()); | 172 | gpu_characteristics.front() = params.gc; |
| 174 | std::memcpy(inline_output.data(), ¶ms.gc, inline_output.size()); | 173 | } |
| 175 | return NvResult::Success; | 174 | return NvResult::Success; |
| 176 | } | 175 | } |
| 177 | 176 | ||
| 178 | NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output) { | 177 | NvResult nvhost_ctrl_gpu::GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params) { |
| 179 | IoctlGpuGetTpcMasksArgs params{}; | ||
| 180 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 181 | LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size); | 178 | LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size); |
| 182 | if (params.mask_buffer_size != 0) { | 179 | if (params.mask_buffer_size != 0) { |
| 183 | params.tcp_mask = 3; | 180 | params.tcp_mask = 3; |
| 184 | } | 181 | } |
| 185 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 186 | return NvResult::Success; | 182 | return NvResult::Success; |
| 187 | } | 183 | } |
| 188 | 184 | ||
| 189 | NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output, | 185 | NvResult nvhost_ctrl_gpu::GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask) { |
| 190 | std::span<u8> inline_output) { | ||
| 191 | IoctlGpuGetTpcMasksArgs params{}; | ||
| 192 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 193 | LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size); | 186 | LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size); |
| 194 | if (params.mask_buffer_size != 0) { | 187 | if (params.mask_buffer_size != 0) { |
| 195 | params.tcp_mask = 3; | 188 | params.tcp_mask = 3; |
| 196 | } | 189 | } |
| 197 | std::memcpy(output.data(), ¶ms, output.size()); | 190 | if (!tpc_mask.empty()) { |
| 198 | std::memcpy(inline_output.data(), ¶ms.tcp_mask, inline_output.size()); | 191 | tpc_mask.front() = params.tcp_mask; |
| 192 | } | ||
| 199 | return NvResult::Success; | 193 | return NvResult::Success; |
| 200 | } | 194 | } |
| 201 | 195 | ||
| 202 | NvResult nvhost_ctrl_gpu::GetActiveSlotMask(std::span<const u8> input, std::span<u8> output) { | 196 | NvResult nvhost_ctrl_gpu::GetActiveSlotMask(IoctlActiveSlotMask& params) { |
| 203 | LOG_DEBUG(Service_NVDRV, "called"); | 197 | LOG_DEBUG(Service_NVDRV, "called"); |
| 204 | 198 | ||
| 205 | IoctlActiveSlotMask params{}; | ||
| 206 | if (input.size() > 0) { | ||
| 207 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 208 | } | ||
| 209 | params.slot = 0x07; | 199 | params.slot = 0x07; |
| 210 | params.mask = 0x01; | 200 | params.mask = 0x01; |
| 211 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 212 | return NvResult::Success; | 201 | return NvResult::Success; |
| 213 | } | 202 | } |
| 214 | 203 | ||
| 215 | NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output) { | 204 | NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(IoctlZcullGetCtxSize& params) { |
| 216 | LOG_DEBUG(Service_NVDRV, "called"); | 205 | LOG_DEBUG(Service_NVDRV, "called"); |
| 217 | |||
| 218 | IoctlZcullGetCtxSize params{}; | ||
| 219 | if (input.size() > 0) { | ||
| 220 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 221 | } | ||
| 222 | params.size = 0x1; | 206 | params.size = 0x1; |
| 223 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 224 | return NvResult::Success; | 207 | return NvResult::Success; |
| 225 | } | 208 | } |
| 226 | 209 | ||
| 227 | NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8> output) { | 210 | NvResult nvhost_ctrl_gpu::ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params) { |
| 228 | LOG_DEBUG(Service_NVDRV, "called"); | 211 | LOG_DEBUG(Service_NVDRV, "called"); |
| 229 | |||
| 230 | IoctlNvgpuGpuZcullGetInfoArgs params{}; | ||
| 231 | |||
| 232 | if (input.size() > 0) { | ||
| 233 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 234 | } | ||
| 235 | |||
| 236 | params.width_align_pixels = 0x20; | 212 | params.width_align_pixels = 0x20; |
| 237 | params.height_align_pixels = 0x20; | 213 | params.height_align_pixels = 0x20; |
| 238 | params.pixel_squares_by_aliquots = 0x400; | 214 | params.pixel_squares_by_aliquots = 0x400; |
| @@ -243,53 +219,28 @@ NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8> | |||
| 243 | params.subregion_width_align_pixels = 0x20; | 219 | params.subregion_width_align_pixels = 0x20; |
| 244 | params.subregion_height_align_pixels = 0x40; | 220 | params.subregion_height_align_pixels = 0x40; |
| 245 | params.subregion_count = 0x10; | 221 | params.subregion_count = 0x10; |
| 246 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 247 | return NvResult::Success; | 222 | return NvResult::Success; |
| 248 | } | 223 | } |
| 249 | 224 | ||
| 250 | NvResult nvhost_ctrl_gpu::ZBCSetTable(std::span<const u8> input, std::span<u8> output) { | 225 | NvResult nvhost_ctrl_gpu::ZBCSetTable(IoctlZbcSetTable& params) { |
| 251 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); | 226 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); |
| 252 | |||
| 253 | IoctlZbcSetTable params{}; | ||
| 254 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 255 | // TODO(ogniK): What does this even actually do? | 227 | // TODO(ogniK): What does this even actually do? |
| 256 | |||
| 257 | // Prevent null pointer being passed as arg 1 | ||
| 258 | if (output.empty()) { | ||
| 259 | LOG_WARNING(Service_NVDRV, "Avoiding passing null pointer to memcpy"); | ||
| 260 | } else { | ||
| 261 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 262 | } | ||
| 263 | return NvResult::Success; | 228 | return NvResult::Success; |
| 264 | } | 229 | } |
| 265 | 230 | ||
| 266 | NvResult nvhost_ctrl_gpu::ZBCQueryTable(std::span<const u8> input, std::span<u8> output) { | 231 | NvResult nvhost_ctrl_gpu::ZBCQueryTable(IoctlZbcQueryTable& params) { |
| 267 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); | 232 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); |
| 268 | |||
| 269 | IoctlZbcQueryTable params{}; | ||
| 270 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 271 | // TODO : To implement properly | ||
| 272 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 273 | return NvResult::Success; | 233 | return NvResult::Success; |
| 274 | } | 234 | } |
| 275 | 235 | ||
| 276 | NvResult nvhost_ctrl_gpu::FlushL2(std::span<const u8> input, std::span<u8> output) { | 236 | NvResult nvhost_ctrl_gpu::FlushL2(IoctlFlushL2& params) { |
| 277 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); | 237 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); |
| 278 | |||
| 279 | IoctlFlushL2 params{}; | ||
| 280 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 281 | // TODO : To implement properly | ||
| 282 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 283 | return NvResult::Success; | 238 | return NvResult::Success; |
| 284 | } | 239 | } |
| 285 | 240 | ||
| 286 | NvResult nvhost_ctrl_gpu::GetGpuTime(std::span<const u8> input, std::span<u8> output) { | 241 | NvResult nvhost_ctrl_gpu::GetGpuTime(IoctlGetGpuTime& params) { |
| 287 | LOG_DEBUG(Service_NVDRV, "called"); | 242 | LOG_DEBUG(Service_NVDRV, "called"); |
| 288 | |||
| 289 | IoctlGetGpuTime params{}; | ||
| 290 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 291 | params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count()); | 243 | params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count()); |
| 292 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 293 | return NvResult::Success; | 244 | return NvResult::Success; |
| 294 | } | 245 | } |
| 295 | 246 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index 97995551c..d170299bd 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h | |||
| @@ -151,21 +151,20 @@ private: | |||
| 151 | }; | 151 | }; |
| 152 | static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size"); | 152 | static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size"); |
| 153 | 153 | ||
| 154 | NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output); | 154 | NvResult GetCharacteristics1(IoctlCharacteristics& params); |
| 155 | NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output, | 155 | NvResult GetCharacteristics3(IoctlCharacteristics& params, |
| 156 | std::span<u8> inline_output); | 156 | std::span<IoctlGpuCharacteristics> gpu_characteristics); |
| 157 | 157 | ||
| 158 | NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output); | 158 | NvResult GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params); |
| 159 | NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output, | 159 | NvResult GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask); |
| 160 | std::span<u8> inline_output); | 160 | |
| 161 | 161 | NvResult GetActiveSlotMask(IoctlActiveSlotMask& params); | |
| 162 | NvResult GetActiveSlotMask(std::span<const u8> input, std::span<u8> output); | 162 | NvResult ZCullGetCtxSize(IoctlZcullGetCtxSize& params); |
| 163 | NvResult ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output); | 163 | NvResult ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params); |
| 164 | NvResult ZCullGetInfo(std::span<const u8> input, std::span<u8> output); | 164 | NvResult ZBCSetTable(IoctlZbcSetTable& params); |
| 165 | NvResult ZBCSetTable(std::span<const u8> input, std::span<u8> output); | 165 | NvResult ZBCQueryTable(IoctlZbcQueryTable& params); |
| 166 | NvResult ZBCQueryTable(std::span<const u8> input, std::span<u8> output); | 166 | NvResult FlushL2(IoctlFlushL2& params); |
| 167 | NvResult FlushL2(std::span<const u8> input, std::span<u8> output); | 167 | NvResult GetGpuTime(IoctlGetGpuTime& params); |
| 168 | NvResult GetGpuTime(std::span<const u8> input, std::span<u8> output); | ||
| 169 | 168 | ||
| 170 | EventInterface& events_interface; | 169 | EventInterface& events_interface; |
| 171 | 170 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 46a25fcab..b0395c2f0 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "core/hle/service/nvdrv/core/container.h" | 8 | #include "core/hle/service/nvdrv/core/container.h" |
| 9 | #include "core/hle/service/nvdrv/core/nvmap.h" | 9 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 10 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" | 10 | #include "core/hle/service/nvdrv/core/syncpoint_manager.h" |
| 11 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 11 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" | 12 | #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" |
| 12 | #include "core/hle/service/nvdrv/nvdrv.h" | 13 | #include "core/hle/service/nvdrv/nvdrv.h" |
| 13 | #include "core/memory.h" | 14 | #include "core/memory.h" |
| @@ -52,7 +53,7 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 52 | case 0x0: | 53 | case 0x0: |
| 53 | switch (command.cmd) { | 54 | switch (command.cmd) { |
| 54 | case 0x3: | 55 | case 0x3: |
| 55 | return GetWaitbase(input, output); | 56 | return WrapFixed(this, &nvhost_gpu::GetWaitbase, input, output); |
| 56 | default: | 57 | default: |
| 57 | break; | 58 | break; |
| 58 | } | 59 | } |
| @@ -60,25 +61,25 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 60 | case 'H': | 61 | case 'H': |
| 61 | switch (command.cmd) { | 62 | switch (command.cmd) { |
| 62 | case 0x1: | 63 | case 0x1: |
| 63 | return SetNVMAPfd(input, output); | 64 | return WrapFixed(this, &nvhost_gpu::SetNVMAPfd, input, output); |
| 64 | case 0x3: | 65 | case 0x3: |
| 65 | return ChannelSetTimeout(input, output); | 66 | return WrapFixed(this, &nvhost_gpu::ChannelSetTimeout, input, output); |
| 66 | case 0x8: | 67 | case 0x8: |
| 67 | return SubmitGPFIFOBase(input, output, false); | 68 | return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, false); |
| 68 | case 0x9: | 69 | case 0x9: |
| 69 | return AllocateObjectContext(input, output); | 70 | return WrapFixed(this, &nvhost_gpu::AllocateObjectContext, input, output); |
| 70 | case 0xb: | 71 | case 0xb: |
| 71 | return ZCullBind(input, output); | 72 | return WrapFixed(this, &nvhost_gpu::ZCullBind, input, output); |
| 72 | case 0xc: | 73 | case 0xc: |
| 73 | return SetErrorNotifier(input, output); | 74 | return WrapFixed(this, &nvhost_gpu::SetErrorNotifier, input, output); |
| 74 | case 0xd: | 75 | case 0xd: |
| 75 | return SetChannelPriority(input, output); | 76 | return WrapFixed(this, &nvhost_gpu::SetChannelPriority, input, output); |
| 76 | case 0x1a: | 77 | case 0x1a: |
| 77 | return AllocGPFIFOEx2(input, output); | 78 | return WrapFixed(this, &nvhost_gpu::AllocGPFIFOEx2, input, output); |
| 78 | case 0x1b: | 79 | case 0x1b: |
| 79 | return SubmitGPFIFOBase(input, output, true); | 80 | return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, true); |
| 80 | case 0x1d: | 81 | case 0x1d: |
| 81 | return ChannelSetTimeslice(input, output); | 82 | return WrapFixed(this, &nvhost_gpu::ChannelSetTimeslice, input, output); |
| 82 | default: | 83 | default: |
| 83 | break; | 84 | break; |
| 84 | } | 85 | } |
| @@ -86,9 +87,9 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 86 | case 'G': | 87 | case 'G': |
| 87 | switch (command.cmd) { | 88 | switch (command.cmd) { |
| 88 | case 0x14: | 89 | case 0x14: |
| 89 | return SetClientData(input, output); | 90 | return WrapFixed(this, &nvhost_gpu::SetClientData, input, output); |
| 90 | case 0x15: | 91 | case 0x15: |
| 91 | return GetClientData(input, output); | 92 | return WrapFixed(this, &nvhost_gpu::GetClientData, input, output); |
| 92 | default: | 93 | default: |
| 93 | break; | 94 | break; |
| 94 | } | 95 | } |
| @@ -104,7 +105,8 @@ NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 104 | case 'H': | 105 | case 'H': |
| 105 | switch (command.cmd) { | 106 | switch (command.cmd) { |
| 106 | case 0x1b: | 107 | case 0x1b: |
| 107 | return SubmitGPFIFOBase(input, inline_input, output); | 108 | return WrapFixedInlIn(this, &nvhost_gpu::SubmitGPFIFOBase2, input, inline_input, |
| 109 | output); | ||
| 108 | } | 110 | } |
| 109 | break; | 111 | break; |
| 110 | } | 112 | } |
| @@ -121,63 +123,45 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 121 | void nvhost_gpu::OnOpen(DeviceFD fd) {} | 123 | void nvhost_gpu::OnOpen(DeviceFD fd) {} |
| 122 | void nvhost_gpu::OnClose(DeviceFD fd) {} | 124 | void nvhost_gpu::OnClose(DeviceFD fd) {} |
| 123 | 125 | ||
| 124 | NvResult nvhost_gpu::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) { | 126 | NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) { |
| 125 | IoctlSetNvmapFD params{}; | ||
| 126 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 127 | LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd); | 127 | LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd); |
| 128 | 128 | ||
| 129 | nvmap_fd = params.nvmap_fd; | 129 | nvmap_fd = params.nvmap_fd; |
| 130 | return NvResult::Success; | 130 | return NvResult::Success; |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | NvResult nvhost_gpu::SetClientData(std::span<const u8> input, std::span<u8> output) { | 133 | NvResult nvhost_gpu::SetClientData(IoctlClientData& params) { |
| 134 | LOG_DEBUG(Service_NVDRV, "called"); | 134 | LOG_DEBUG(Service_NVDRV, "called"); |
| 135 | |||
| 136 | IoctlClientData params{}; | ||
| 137 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 138 | user_data = params.data; | 135 | user_data = params.data; |
| 139 | return NvResult::Success; | 136 | return NvResult::Success; |
| 140 | } | 137 | } |
| 141 | 138 | ||
| 142 | NvResult nvhost_gpu::GetClientData(std::span<const u8> input, std::span<u8> output) { | 139 | NvResult nvhost_gpu::GetClientData(IoctlClientData& params) { |
| 143 | LOG_DEBUG(Service_NVDRV, "called"); | 140 | LOG_DEBUG(Service_NVDRV, "called"); |
| 144 | |||
| 145 | IoctlClientData params{}; | ||
| 146 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 147 | params.data = user_data; | 141 | params.data = user_data; |
| 148 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 149 | return NvResult::Success; | 142 | return NvResult::Success; |
| 150 | } | 143 | } |
| 151 | 144 | ||
| 152 | NvResult nvhost_gpu::ZCullBind(std::span<const u8> input, std::span<u8> output) { | 145 | NvResult nvhost_gpu::ZCullBind(IoctlZCullBind& params) { |
| 153 | std::memcpy(&zcull_params, input.data(), input.size()); | 146 | zcull_params = params; |
| 154 | LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va, | 147 | LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va, |
| 155 | zcull_params.mode); | 148 | zcull_params.mode); |
| 156 | |||
| 157 | std::memcpy(output.data(), &zcull_params, output.size()); | ||
| 158 | return NvResult::Success; | 149 | return NvResult::Success; |
| 159 | } | 150 | } |
| 160 | 151 | ||
| 161 | NvResult nvhost_gpu::SetErrorNotifier(std::span<const u8> input, std::span<u8> output) { | 152 | NvResult nvhost_gpu::SetErrorNotifier(IoctlSetErrorNotifier& params) { |
| 162 | IoctlSetErrorNotifier params{}; | ||
| 163 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 164 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset, | 153 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset, |
| 165 | params.size, params.mem); | 154 | params.size, params.mem); |
| 166 | |||
| 167 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 168 | return NvResult::Success; | 155 | return NvResult::Success; |
| 169 | } | 156 | } |
| 170 | 157 | ||
| 171 | NvResult nvhost_gpu::SetChannelPriority(std::span<const u8> input, std::span<u8> output) { | 158 | NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) { |
| 172 | std::memcpy(&channel_priority, input.data(), input.size()); | 159 | channel_priority = params.priority; |
| 173 | LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority); | 160 | LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority); |
| 174 | |||
| 175 | return NvResult::Success; | 161 | return NvResult::Success; |
| 176 | } | 162 | } |
| 177 | 163 | ||
| 178 | NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output) { | 164 | NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params) { |
| 179 | IoctlAllocGpfifoEx2 params{}; | ||
| 180 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 181 | LOG_WARNING(Service_NVDRV, | 165 | LOG_WARNING(Service_NVDRV, |
| 182 | "(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, " | 166 | "(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, " |
| 183 | "unk1={:X}, unk2={:X}, unk3={:X}", | 167 | "unk1={:X}, unk2={:X}, unk3={:X}", |
| @@ -193,18 +177,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> out | |||
| 193 | 177 | ||
| 194 | params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint); | 178 | params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint); |
| 195 | 179 | ||
| 196 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 197 | return NvResult::Success; | 180 | return NvResult::Success; |
| 198 | } | 181 | } |
| 199 | 182 | ||
| 200 | NvResult nvhost_gpu::AllocateObjectContext(std::span<const u8> input, std::span<u8> output) { | 183 | NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) { |
| 201 | IoctlAllocObjCtx params{}; | ||
| 202 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 203 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num, | 184 | LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num, |
| 204 | params.flags); | 185 | params.flags); |
| 205 | 186 | ||
| 206 | params.obj_id = 0x0; | 187 | params.obj_id = 0x0; |
| 207 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 208 | return NvResult::Success; | 188 | return NvResult::Success; |
| 209 | } | 189 | } |
| 210 | 190 | ||
| @@ -248,8 +228,7 @@ static boost::container::small_vector<Tegra::CommandHeader, 512> BuildIncrementW | |||
| 248 | return result; | 228 | return result; |
| 249 | } | 229 | } |
| 250 | 230 | ||
| 251 | NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output, | 231 | NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries) { |
| 252 | Tegra::CommandList&& entries) { | ||
| 253 | LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address, | 232 | LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address, |
| 254 | params.num_entries, params.flags.raw); | 233 | params.num_entries, params.flags.raw); |
| 255 | 234 | ||
| @@ -290,65 +269,55 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> o | |||
| 290 | 269 | ||
| 291 | flags.raw = 0; | 270 | flags.raw = 0; |
| 292 | 271 | ||
| 293 | std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo)); | ||
| 294 | return NvResult::Success; | 272 | return NvResult::Success; |
| 295 | } | 273 | } |
| 296 | 274 | ||
| 297 | NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output, | 275 | NvResult nvhost_gpu::SubmitGPFIFOBase1(IoctlSubmitGpfifo& params, |
| 298 | bool kickoff) { | 276 | std::span<Tegra::CommandListHeader> commands, bool kickoff) { |
| 299 | if (input.size() < sizeof(IoctlSubmitGpfifo)) { | 277 | if (params.num_entries > commands.size()) { |
| 300 | UNIMPLEMENTED(); | 278 | UNIMPLEMENTED(); |
| 301 | return NvResult::InvalidSize; | 279 | return NvResult::InvalidSize; |
| 302 | } | 280 | } |
| 303 | IoctlSubmitGpfifo params{}; | ||
| 304 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); | ||
| 305 | Tegra::CommandList entries(params.num_entries); | ||
| 306 | 281 | ||
| 282 | Tegra::CommandList entries(params.num_entries); | ||
| 307 | if (kickoff) { | 283 | if (kickoff) { |
| 308 | system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(), | 284 | system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(), |
| 309 | params.num_entries * sizeof(Tegra::CommandListHeader)); | 285 | params.num_entries * sizeof(Tegra::CommandListHeader)); |
| 310 | } else { | 286 | } else { |
| 311 | std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)], | 287 | std::memcpy(entries.command_lists.data(), commands.data(), |
| 312 | params.num_entries * sizeof(Tegra::CommandListHeader)); | 288 | params.num_entries * sizeof(Tegra::CommandListHeader)); |
| 313 | } | 289 | } |
| 314 | 290 | ||
| 315 | return SubmitGPFIFOImpl(params, output, std::move(entries)); | 291 | return SubmitGPFIFOImpl(params, std::move(entries)); |
| 316 | } | 292 | } |
| 317 | 293 | ||
| 318 | NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline, | 294 | NvResult nvhost_gpu::SubmitGPFIFOBase2(IoctlSubmitGpfifo& params, |
| 319 | std::span<u8> output) { | 295 | std::span<const Tegra::CommandListHeader> commands) { |
| 320 | if (input.size() < sizeof(IoctlSubmitGpfifo)) { | 296 | if (params.num_entries > commands.size()) { |
| 321 | UNIMPLEMENTED(); | 297 | UNIMPLEMENTED(); |
| 322 | return NvResult::InvalidSize; | 298 | return NvResult::InvalidSize; |
| 323 | } | 299 | } |
| 324 | IoctlSubmitGpfifo params{}; | 300 | |
| 325 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); | ||
| 326 | Tegra::CommandList entries(params.num_entries); | 301 | Tegra::CommandList entries(params.num_entries); |
| 327 | std::memcpy(entries.command_lists.data(), input_inline.data(), input_inline.size()); | 302 | std::memcpy(entries.command_lists.data(), commands.data(), |
| 328 | return SubmitGPFIFOImpl(params, output, std::move(entries)); | 303 | params.num_entries * sizeof(Tegra::CommandListHeader)); |
| 304 | return SubmitGPFIFOImpl(params, std::move(entries)); | ||
| 329 | } | 305 | } |
| 330 | 306 | ||
| 331 | NvResult nvhost_gpu::GetWaitbase(std::span<const u8> input, std::span<u8> output) { | 307 | NvResult nvhost_gpu::GetWaitbase(IoctlGetWaitbase& params) { |
| 332 | IoctlGetWaitbase params{}; | ||
| 333 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase)); | ||
| 334 | LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown); | 308 | LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown); |
| 335 | 309 | ||
| 336 | params.value = 0; // Seems to be hard coded at 0 | 310 | params.value = 0; // Seems to be hard coded at 0 |
| 337 | std::memcpy(output.data(), ¶ms, output.size()); | ||
| 338 | return NvResult::Success; | 311 | return NvResult::Success; |
| 339 | } | 312 | } |
| 340 | 313 | ||
| 341 | NvResult nvhost_gpu::ChannelSetTimeout(std::span<const u8> input, std::span<u8> output) { | 314 | NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) { |
| 342 | IoctlChannelSetTimeout params{}; | ||
| 343 | std::memcpy(¶ms, input.data(), sizeof(IoctlChannelSetTimeout)); | ||
| 344 | LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout); | 315 | LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout); |
| 345 | 316 | ||
| 346 | return NvResult::Success; | 317 | return NvResult::Success; |
| 347 | } | 318 | } |
| 348 | 319 | ||
| 349 | NvResult nvhost_gpu::ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output) { | 320 | NvResult nvhost_gpu::ChannelSetTimeslice(IoctlSetTimeslice& params) { |
| 350 | IoctlSetTimeslice params{}; | ||
| 351 | std::memcpy(¶ms, input.data(), sizeof(IoctlSetTimeslice)); | ||
| 352 | LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice); | 321 | LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice); |
| 353 | 322 | ||
| 354 | channel_timeslice = params.timeslice; | 323 | channel_timeslice = params.timeslice; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 529c20526..88fd228ff 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h | |||
| @@ -186,23 +186,24 @@ private: | |||
| 186 | u32_le channel_priority{}; | 186 | u32_le channel_priority{}; |
| 187 | u32_le channel_timeslice{}; | 187 | u32_le channel_timeslice{}; |
| 188 | 188 | ||
| 189 | NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output); | 189 | NvResult SetNVMAPfd(IoctlSetNvmapFD& params); |
| 190 | NvResult SetClientData(std::span<const u8> input, std::span<u8> output); | 190 | NvResult SetClientData(IoctlClientData& params); |
| 191 | NvResult GetClientData(std::span<const u8> input, std::span<u8> output); | 191 | NvResult GetClientData(IoctlClientData& params); |
| 192 | NvResult ZCullBind(std::span<const u8> input, std::span<u8> output); | 192 | NvResult ZCullBind(IoctlZCullBind& params); |
| 193 | NvResult SetErrorNotifier(std::span<const u8> input, std::span<u8> output); | 193 | NvResult SetErrorNotifier(IoctlSetErrorNotifier& params); |
| 194 | NvResult SetChannelPriority(std::span<const u8> input, std::span<u8> output); | 194 | NvResult SetChannelPriority(IoctlChannelSetPriority& params); |
| 195 | NvResult AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output); | 195 | NvResult AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params); |
| 196 | NvResult AllocateObjectContext(std::span<const u8> input, std::span<u8> output); | 196 | NvResult AllocateObjectContext(IoctlAllocObjCtx& params); |
| 197 | NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output, | 197 | |
| 198 | Tegra::CommandList&& entries); | 198 | NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries); |
| 199 | NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output, | 199 | NvResult SubmitGPFIFOBase1(IoctlSubmitGpfifo& params, |
| 200 | bool kickoff = false); | 200 | std::span<Tegra::CommandListHeader> commands, bool kickoff = false); |
| 201 | NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline, | 201 | NvResult SubmitGPFIFOBase2(IoctlSubmitGpfifo& params, |
| 202 | std::span<u8> output); | 202 | std::span<const Tegra::CommandListHeader> commands); |
| 203 | NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output); | 203 | |
| 204 | NvResult ChannelSetTimeout(std::span<const u8> input, std::span<u8> output); | 204 | NvResult GetWaitbase(IoctlGetWaitbase& params); |
| 205 | NvResult ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output); | 205 | NvResult ChannelSetTimeout(IoctlChannelSetTimeout& params); |
| 206 | NvResult ChannelSetTimeslice(IoctlSetTimeslice& params); | ||
| 206 | 207 | ||
| 207 | EventInterface& events_interface; | 208 | EventInterface& events_interface; |
| 208 | NvCore::Container& core; | 209 | NvCore::Container& core; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index a174442a6..f43914e1b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 7 | #include "core/core.h" | 7 | #include "core/core.h" |
| 8 | #include "core/hle/service/nvdrv/core/container.h" | 8 | #include "core/hle/service/nvdrv/core/container.h" |
| 9 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 9 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" | 10 | #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" |
| 10 | #include "video_core/renderer_base.h" | 11 | #include "video_core/renderer_base.h" |
| 11 | 12 | ||
| @@ -25,18 +26,18 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 25 | if (!host1x_file.fd_to_id.contains(fd)) { | 26 | if (!host1x_file.fd_to_id.contains(fd)) { |
| 26 | host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++; | 27 | host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++; |
| 27 | } | 28 | } |
| 28 | return Submit(fd, input, output); | 29 | return WrapFixedVariable(this, &nvhost_nvdec::Submit, input, output, fd); |
| 29 | } | 30 | } |
| 30 | case 0x2: | 31 | case 0x2: |
| 31 | return GetSyncpoint(input, output); | 32 | return WrapFixed(this, &nvhost_nvdec::GetSyncpoint, input, output); |
| 32 | case 0x3: | 33 | case 0x3: |
| 33 | return GetWaitbase(input, output); | 34 | return WrapFixed(this, &nvhost_nvdec::GetWaitbase, input, output); |
| 34 | case 0x7: | 35 | case 0x7: |
| 35 | return SetSubmitTimeout(input, output); | 36 | return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output); |
| 36 | case 0x9: | 37 | case 0x9: |
| 37 | return MapBuffer(input, output); | 38 | return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output); |
| 38 | case 0xa: | 39 | case 0xa: |
| 39 | return UnmapBuffer(input, output); | 40 | return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output); |
| 40 | default: | 41 | default: |
| 41 | break; | 42 | break; |
| 42 | } | 43 | } |
| @@ -44,7 +45,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 44 | case 'H': | 45 | case 'H': |
| 45 | switch (command.cmd) { | 46 | switch (command.cmd) { |
| 46 | case 0x1: | 47 | case 0x1: |
| 47 | return SetNVMAPfd(input); | 48 | return WrapFixed(this, &nvhost_nvdec::SetNVMAPfd, input, output); |
| 48 | default: | 49 | default: |
| 49 | break; | 50 | break; |
| 50 | } | 51 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 61649aa4a..74c701b95 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp | |||
| @@ -29,6 +29,9 @@ std::size_t SliceVectors(std::span<const u8> input, std::vector<T>& dst, std::si | |||
| 29 | return 0; | 29 | return 0; |
| 30 | } | 30 | } |
| 31 | const size_t bytes_copied = count * sizeof(T); | 31 | const size_t bytes_copied = count * sizeof(T); |
| 32 | if (input.size() < offset + bytes_copied) { | ||
| 33 | return 0; | ||
| 34 | } | ||
| 32 | std::memcpy(dst.data(), input.data() + offset, bytes_copied); | 35 | std::memcpy(dst.data(), input.data() + offset, bytes_copied); |
| 33 | return bytes_copied; | 36 | return bytes_copied; |
| 34 | } | 37 | } |
| @@ -41,6 +44,9 @@ std::size_t WriteVectors(std::span<u8> dst, const std::vector<T>& src, std::size | |||
| 41 | return 0; | 44 | return 0; |
| 42 | } | 45 | } |
| 43 | const size_t bytes_copied = src.size() * sizeof(T); | 46 | const size_t bytes_copied = src.size() * sizeof(T); |
| 47 | if (dst.size() < offset + bytes_copied) { | ||
| 48 | return 0; | ||
| 49 | } | ||
| 44 | std::memcpy(dst.data() + offset, src.data(), bytes_copied); | 50 | std::memcpy(dst.data() + offset, src.data(), bytes_copied); |
| 45 | return bytes_copied; | 51 | return bytes_copied; |
| 46 | } | 52 | } |
| @@ -63,18 +69,14 @@ nvhost_nvdec_common::~nvhost_nvdec_common() { | |||
| 63 | core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint); | 69 | core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint); |
| 64 | } | 70 | } |
| 65 | 71 | ||
| 66 | NvResult nvhost_nvdec_common::SetNVMAPfd(std::span<const u8> input) { | 72 | NvResult nvhost_nvdec_common::SetNVMAPfd(IoctlSetNvmapFD& params) { |
| 67 | IoctlSetNvmapFD params{}; | ||
| 68 | std::memcpy(¶ms, input.data(), sizeof(IoctlSetNvmapFD)); | ||
| 69 | LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd); | 73 | LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd); |
| 70 | 74 | ||
| 71 | nvmap_fd = params.nvmap_fd; | 75 | nvmap_fd = params.nvmap_fd; |
| 72 | return NvResult::Success; | 76 | return NvResult::Success; |
| 73 | } | 77 | } |
| 74 | 78 | ||
| 75 | NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output) { | 79 | NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, DeviceFD fd) { |
| 76 | IoctlSubmit params{}; | ||
| 77 | std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit)); | ||
| 78 | LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count); | 80 | LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count); |
| 79 | 81 | ||
| 80 | // Instantiate param buffers | 82 | // Instantiate param buffers |
| @@ -85,12 +87,12 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std | |||
| 85 | std::vector<u32> fence_thresholds(params.fence_count); | 87 | std::vector<u32> fence_thresholds(params.fence_count); |
| 86 | 88 | ||
| 87 | // Slice input into their respective buffers | 89 | // Slice input into their respective buffers |
| 88 | std::size_t offset = sizeof(IoctlSubmit); | 90 | std::size_t offset = 0; |
| 89 | offset += SliceVectors(input, command_buffers, params.cmd_buffer_count, offset); | 91 | offset += SliceVectors(data, command_buffers, params.cmd_buffer_count, offset); |
| 90 | offset += SliceVectors(input, relocs, params.relocation_count, offset); | 92 | offset += SliceVectors(data, relocs, params.relocation_count, offset); |
| 91 | offset += SliceVectors(input, reloc_shifts, params.relocation_count, offset); | 93 | offset += SliceVectors(data, reloc_shifts, params.relocation_count, offset); |
| 92 | offset += SliceVectors(input, syncpt_increments, params.syncpoint_count, offset); | 94 | offset += SliceVectors(data, syncpt_increments, params.syncpoint_count, offset); |
| 93 | offset += SliceVectors(input, fence_thresholds, params.fence_count, offset); | 95 | offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); |
| 94 | 96 | ||
| 95 | auto& gpu = system.GPU(); | 97 | auto& gpu = system.GPU(); |
| 96 | if (gpu.UseNvdec()) { | 98 | if (gpu.UseNvdec()) { |
| @@ -108,72 +110,51 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std | |||
| 108 | cmdlist.size() * sizeof(u32)); | 110 | cmdlist.size() * sizeof(u32)); |
| 109 | gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); | 111 | gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); |
| 110 | } | 112 | } |
| 111 | std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); | ||
| 112 | // Some games expect command_buffers to be written back | 113 | // Some games expect command_buffers to be written back |
| 113 | offset = sizeof(IoctlSubmit); | 114 | offset = 0; |
| 114 | offset += WriteVectors(output, command_buffers, offset); | 115 | offset += WriteVectors(data, command_buffers, offset); |
| 115 | offset += WriteVectors(output, relocs, offset); | 116 | offset += WriteVectors(data, relocs, offset); |
| 116 | offset += WriteVectors(output, reloc_shifts, offset); | 117 | offset += WriteVectors(data, reloc_shifts, offset); |
| 117 | offset += WriteVectors(output, syncpt_increments, offset); | 118 | offset += WriteVectors(data, syncpt_increments, offset); |
| 118 | offset += WriteVectors(output, fence_thresholds, offset); | 119 | offset += WriteVectors(data, fence_thresholds, offset); |
| 119 | 120 | ||
| 120 | return NvResult::Success; | 121 | return NvResult::Success; |
| 121 | } | 122 | } |
| 122 | 123 | ||
| 123 | NvResult nvhost_nvdec_common::GetSyncpoint(std::span<const u8> input, std::span<u8> output) { | 124 | NvResult nvhost_nvdec_common::GetSyncpoint(IoctlGetSyncpoint& params) { |
| 124 | IoctlGetSyncpoint params{}; | ||
| 125 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint)); | ||
| 126 | LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); | 125 | LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); |
| 127 | |||
| 128 | // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]}; | ||
| 129 | params.value = channel_syncpoint; | 126 | params.value = channel_syncpoint; |
| 130 | std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint)); | ||
| 131 | |||
| 132 | return NvResult::Success; | 127 | return NvResult::Success; |
| 133 | } | 128 | } |
| 134 | 129 | ||
| 135 | NvResult nvhost_nvdec_common::GetWaitbase(std::span<const u8> input, std::span<u8> output) { | 130 | NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) { |
| 136 | IoctlGetWaitbase params{}; | ||
| 137 | LOG_CRITICAL(Service_NVDRV, "called WAITBASE"); | 131 | LOG_CRITICAL(Service_NVDRV, "called WAITBASE"); |
| 138 | std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase)); | ||
| 139 | params.value = 0; // Seems to be hard coded at 0 | 132 | params.value = 0; // Seems to be hard coded at 0 |
| 140 | std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase)); | ||
| 141 | return NvResult::Success; | 133 | return NvResult::Success; |
| 142 | } | 134 | } |
| 143 | 135 | ||
| 144 | NvResult nvhost_nvdec_common::MapBuffer(std::span<const u8> input, std::span<u8> output) { | 136 | NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) { |
| 145 | IoctlMapBuffer params{}; | 137 | const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size())); |
| 146 | std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); | 138 | for (size_t i = 0; i < num_entries; i++) { |
| 147 | std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries); | 139 | entries[i].map_address = nvmap.PinHandle(entries[i].map_handle); |
| 148 | |||
| 149 | SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | ||
| 150 | |||
| 151 | for (auto& cmd_buffer : cmd_buffer_handles) { | ||
| 152 | cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle); | ||
| 153 | } | 140 | } |
| 154 | std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer)); | ||
| 155 | std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), | ||
| 156 | cmd_buffer_handles.size() * sizeof(MapBufferEntry)); | ||
| 157 | 141 | ||
| 158 | return NvResult::Success; | 142 | return NvResult::Success; |
| 159 | } | 143 | } |
| 160 | 144 | ||
| 161 | NvResult nvhost_nvdec_common::UnmapBuffer(std::span<const u8> input, std::span<u8> output) { | 145 | NvResult nvhost_nvdec_common::UnmapBuffer(IoctlMapBuffer& params, |
| 162 | IoctlMapBuffer params{}; | 146 | std::span<MapBufferEntry> entries) { |
| 163 | std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); | 147 | const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size())); |
| 164 | std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries); | 148 | for (size_t i = 0; i < num_entries; i++) { |
| 165 | 149 | nvmap.UnpinHandle(entries[i].map_handle); | |
| 166 | SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | 150 | entries[i] = {}; |
| 167 | for (auto& cmd_buffer : cmd_buffer_handles) { | ||
| 168 | nvmap.UnpinHandle(cmd_buffer.map_handle); | ||
| 169 | } | 151 | } |
| 170 | 152 | ||
| 171 | std::memset(output.data(), 0, output.size()); | 153 | params = {}; |
| 172 | return NvResult::Success; | 154 | return NvResult::Success; |
| 173 | } | 155 | } |
| 174 | 156 | ||
| 175 | NvResult nvhost_nvdec_common::SetSubmitTimeout(std::span<const u8> input, std::span<u8> output) { | 157 | NvResult nvhost_nvdec_common::SetSubmitTimeout(u32 timeout) { |
| 176 | std::memcpy(&submit_timeout, input.data(), input.size()); | ||
| 177 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); | 158 | LOG_WARNING(Service_NVDRV, "(STUBBED) called"); |
| 178 | return NvResult::Success; | 159 | return NvResult::Success; |
| 179 | } | 160 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 9bb573bfe..7ce748e18 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h | |||
| @@ -107,13 +107,13 @@ protected: | |||
| 107 | static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size"); | 107 | static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size"); |
| 108 | 108 | ||
| 109 | /// Ioctl command implementations | 109 | /// Ioctl command implementations |
| 110 | NvResult SetNVMAPfd(std::span<const u8> input); | 110 | NvResult SetNVMAPfd(IoctlSetNvmapFD&); |
| 111 | NvResult Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output); | 111 | NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd); |
| 112 | NvResult GetSyncpoint(std::span<const u8> input, std::span<u8> output); | 112 | NvResult GetSyncpoint(IoctlGetSyncpoint& params); |
| 113 | NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output); | 113 | NvResult GetWaitbase(IoctlGetWaitbase& params); |
| 114 | NvResult MapBuffer(std::span<const u8> input, std::span<u8> output); | 114 | NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries); |
| 115 | NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output); | 115 | NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries); |
| 116 | NvResult SetSubmitTimeout(std::span<const u8> input, std::span<u8> output); | 116 | NvResult SetSubmitTimeout(u32 timeout); |
| 117 | 117 | ||
| 118 | Kernel::KEvent* QueryEvent(u32 event_id) override; | 118 | Kernel::KEvent* QueryEvent(u32 event_id) override; |
| 119 | 119 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp index a05c8cdae..9e6b86458 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 8 | #include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" | 9 | #include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" |
| 9 | 10 | ||
| 10 | namespace Service::Nvidia::Devices { | 11 | namespace Service::Nvidia::Devices { |
| @@ -18,7 +19,7 @@ NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 18 | case 'H': | 19 | case 'H': |
| 19 | switch (command.cmd) { | 20 | switch (command.cmd) { |
| 20 | case 0x1: | 21 | case 0x1: |
| 21 | return SetNVMAPfd(input, output); | 22 | return WrapFixed(this, &nvhost_nvjpg::SetNVMAPfd, input, output); |
| 22 | default: | 23 | default: |
| 23 | break; | 24 | break; |
| 24 | } | 25 | } |
| @@ -46,9 +47,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in | |||
| 46 | void nvhost_nvjpg::OnOpen(DeviceFD fd) {} | 47 | void nvhost_nvjpg::OnOpen(DeviceFD fd) {} |
| 47 | void nvhost_nvjpg::OnClose(DeviceFD fd) {} | 48 | void nvhost_nvjpg::OnClose(DeviceFD fd) {} |
| 48 | 49 | ||
| 49 | NvResult nvhost_nvjpg::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) { | 50 | NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) { |
| 50 | IoctlSetNvmapFD params{}; | ||
| 51 | std::memcpy(¶ms, input.data(), input.size()); | ||
| 52 | LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd); | 51 | LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd); |
| 53 | 52 | ||
| 54 | nvmap_fd = params.nvmap_fd; | 53 | nvmap_fd = params.nvmap_fd; |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h index 5623e0d47..790c97f6a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h | |||
| @@ -33,7 +33,7 @@ private: | |||
| 33 | 33 | ||
| 34 | s32_le nvmap_fd{}; | 34 | s32_le nvmap_fd{}; |
| 35 | 35 | ||
| 36 | NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output); | 36 | NvResult SetNVMAPfd(IoctlSetNvmapFD& params); |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | } // namespace Service::Nvidia::Devices | 39 | } // namespace Service::Nvidia::Devices |
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index c0b8684c3..87f8d7c22 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include "common/logging/log.h" | 5 | #include "common/logging/log.h" |
| 6 | #include "core/core.h" | 6 | #include "core/core.h" |
| 7 | #include "core/hle/service/nvdrv/core/container.h" | 7 | #include "core/hle/service/nvdrv/core/container.h" |
| 8 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 8 | #include "core/hle/service/nvdrv/devices/nvhost_vic.h" | 9 | #include "core/hle/service/nvdrv/devices/nvhost_vic.h" |
| 9 | #include "video_core/renderer_base.h" | 10 | #include "video_core/renderer_base.h" |
| 10 | 11 | ||
| @@ -25,16 +26,16 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 25 | if (!host1x_file.fd_to_id.contains(fd)) { | 26 | if (!host1x_file.fd_to_id.contains(fd)) { |
| 26 | host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++; | 27 | host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++; |
| 27 | } | 28 | } |
| 28 | return Submit(fd, input, output); | 29 | return WrapFixedVariable(this, &nvhost_vic::Submit, input, output, fd); |
| 29 | } | 30 | } |
| 30 | case 0x2: | 31 | case 0x2: |
| 31 | return GetSyncpoint(input, output); | 32 | return WrapFixed(this, &nvhost_vic::GetSyncpoint, input, output); |
| 32 | case 0x3: | 33 | case 0x3: |
| 33 | return GetWaitbase(input, output); | 34 | return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output); |
| 34 | case 0x9: | 35 | case 0x9: |
| 35 | return MapBuffer(input, output); | 36 | return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output); |
| 36 | case 0xa: | 37 | case 0xa: |
| 37 | return UnmapBuffer(input, output); | 38 | return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output); |
| 38 | default: | 39 | default: |
| 39 | break; | 40 | break; |
| 40 | } | 41 | } |
| @@ -42,7 +43,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu | |||
| 42 | case 'H': | 43 | case 'H': |
| 43 | switch (command.cmd) { | 44 | switch (command.cmd) { |
| 44 | case 0x1: | 45 | case 0x1: |
| 45 | return SetNVMAPfd(input); | 46 | return WrapFixed(this, &nvhost_vic::SetNVMAPfd, input, output); |
| 46 | default: | 47 | default: |
| 47 | break; | 48 | break; |
| 48 | } | 49 | } |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 968eaa175..71b2e62ec 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include "core/hle/kernel/k_process.h" | 13 | #include "core/hle/kernel/k_process.h" |
| 14 | #include "core/hle/service/nvdrv/core/container.h" | 14 | #include "core/hle/service/nvdrv/core/container.h" |
| 15 | #include "core/hle/service/nvdrv/core/nvmap.h" | 15 | #include "core/hle/service/nvdrv/core/nvmap.h" |
| 16 | #include "core/hle/service/nvdrv/devices/ioctl_serialization.h" | ||
| 16 | #include "core/hle/service/nvdrv/devices/nvmap.h" | 17 | #include "core/hle/service/nvdrv/devices/nvmap.h" |
| 17 | #include "core/memory.h" | 18 | #include "core/memory.h" |
| 18 | 19 | ||
| @@ -31,17 +32,17 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, | |||
| 31 | case 0x1: | 32 | case 0x1: |
| 32 | switch (command.cmd) { | 33 | switch (command.cmd) { |
| 33 | case 0x1: | 34 | case 0x1: |
| 34 | return IocCreate(input, output); | 35 | return WrapFixed(this, &nvmap::IocCreate, input, output); |
| 35 | case 0x3: | 36 | case 0x3: |
| 36 | return IocFromId(input, output); | 37 | return WrapFixed(this, &nvmap::IocFromId, input, output); |
| 37 | case 0x4: | 38 | case 0x4: |
| 38 | return IocAlloc(input, output); | 39 | return WrapFixed(this, &nvmap::IocAlloc, input, output); |
| 39 | case 0x5: | 40 | case 0x5: |
| 40 | return IocFree(input, output); | 41 | return WrapFixed(this, &nvmap::IocFree, input, output); |
| 41 | case 0x9: | 42 | case 0x9: |
| 42 | return IocParam(input, output); | 43 | return WrapFixed(this, &nvmap::IocParam, input, output); |
| 43 | case 0xe: | 44 | case 0xe: |
| 44 | return IocGetId(input, output); | 45 | return WrapFixed(this, &nvmap::IocGetId, input, output); |
| 45 | default: | 46 | default: |
| 46 | break; | 47 | break; |
| 47 | } | 48 | } |
| @@ -69,9 +70,7 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st | |||
| 69 | void nvmap::OnOpen(DeviceFD fd) {} | 70 | void nvmap::OnOpen(DeviceFD fd) {} |
| 70 | void nvmap::OnClose(DeviceFD fd) {} | 71 | void nvmap::OnClose(DeviceFD fd) {} |
| 71 | 72 | ||
| 72 | NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) { | 73 | NvResult nvmap::IocCreate(IocCreateParams& params) { |
| 73 | IocCreateParams params; | ||
| 74 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 75 | LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); | 74 | LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); |
| 76 | 75 | ||
| 77 | std::shared_ptr<NvCore::NvMap::Handle> handle_description{}; | 76 | std::shared_ptr<NvCore::NvMap::Handle> handle_description{}; |
| @@ -85,13 +84,10 @@ NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) { | |||
| 85 | params.handle = handle_description->id; | 84 | params.handle = handle_description->id; |
| 86 | LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size); | 85 | LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size); |
| 87 | 86 | ||
| 88 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 89 | return NvResult::Success; | 87 | return NvResult::Success; |
| 90 | } | 88 | } |
| 91 | 89 | ||
| 92 | NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) { | 90 | NvResult nvmap::IocAlloc(IocAllocParams& params) { |
| 93 | IocAllocParams params; | ||
| 94 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 95 | LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); | 91 | LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); |
| 96 | 92 | ||
| 97 | if (!params.handle) { | 93 | if (!params.handle) { |
| @@ -133,14 +129,10 @@ NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) { | |||
| 133 | handle_description->size, | 129 | handle_description->size, |
| 134 | Kernel::KMemoryPermission::None, true, false) | 130 | Kernel::KMemoryPermission::None, true, false) |
| 135 | .IsSuccess()); | 131 | .IsSuccess()); |
| 136 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 137 | return result; | 132 | return result; |
| 138 | } | 133 | } |
| 139 | 134 | ||
| 140 | NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) { | 135 | NvResult nvmap::IocGetId(IocGetIdParams& params) { |
| 141 | IocGetIdParams params; | ||
| 142 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 143 | |||
| 144 | LOG_DEBUG(Service_NVDRV, "called"); | 136 | LOG_DEBUG(Service_NVDRV, "called"); |
| 145 | 137 | ||
| 146 | // See the comment in FromId for extra info on this function | 138 | // See the comment in FromId for extra info on this function |
| @@ -157,14 +149,10 @@ NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) { | |||
| 157 | } | 149 | } |
| 158 | 150 | ||
| 159 | params.id = handle_description->id; | 151 | params.id = handle_description->id; |
| 160 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 161 | return NvResult::Success; | 152 | return NvResult::Success; |
| 162 | } | 153 | } |
| 163 | 154 | ||
| 164 | NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) { | 155 | NvResult nvmap::IocFromId(IocFromIdParams& params) { |
| 165 | IocFromIdParams params; | ||
| 166 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 167 | |||
| 168 | LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id); | 156 | LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id); |
| 169 | 157 | ||
| 170 | // Handles and IDs are always the same value in nvmap however IDs can be used globally given the | 158 | // Handles and IDs are always the same value in nvmap however IDs can be used globally given the |
| @@ -188,16 +176,12 @@ NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) { | |||
| 188 | return result; | 176 | return result; |
| 189 | } | 177 | } |
| 190 | params.handle = handle_description->id; | 178 | params.handle = handle_description->id; |
| 191 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 192 | return NvResult::Success; | 179 | return NvResult::Success; |
| 193 | } | 180 | } |
| 194 | 181 | ||
| 195 | NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) { | 182 | NvResult nvmap::IocParam(IocParamParams& params) { |
| 196 | enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 }; | 183 | enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 }; |
| 197 | 184 | ||
| 198 | IocParamParams params; | ||
| 199 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 200 | |||
| 201 | LOG_DEBUG(Service_NVDRV, "called type={}", params.param); | 185 | LOG_DEBUG(Service_NVDRV, "called type={}", params.param); |
| 202 | 186 | ||
| 203 | if (!params.handle) { | 187 | if (!params.handle) { |
| @@ -237,14 +221,10 @@ NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) { | |||
| 237 | return NvResult::BadValue; | 221 | return NvResult::BadValue; |
| 238 | } | 222 | } |
| 239 | 223 | ||
| 240 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 241 | return NvResult::Success; | 224 | return NvResult::Success; |
| 242 | } | 225 | } |
| 243 | 226 | ||
| 244 | NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) { | 227 | NvResult nvmap::IocFree(IocFreeParams& params) { |
| 245 | IocFreeParams params; | ||
| 246 | std::memcpy(¶ms, input.data(), sizeof(params)); | ||
| 247 | |||
| 248 | LOG_DEBUG(Service_NVDRV, "called"); | 228 | LOG_DEBUG(Service_NVDRV, "called"); |
| 249 | 229 | ||
| 250 | if (!params.handle) { | 230 | if (!params.handle) { |
| @@ -267,7 +247,6 @@ NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) { | |||
| 267 | // This is possible when there's internal dups or other duplicates. | 247 | // This is possible when there's internal dups or other duplicates. |
| 268 | } | 248 | } |
| 269 | 249 | ||
| 270 | std::memcpy(output.data(), ¶ms, sizeof(params)); | ||
| 271 | return NvResult::Success; | 250 | return NvResult::Success; |
| 272 | } | 251 | } |
| 273 | 252 | ||
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 4c0cc71cd..049c11028 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h | |||
| @@ -99,12 +99,12 @@ public: | |||
| 99 | }; | 99 | }; |
| 100 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); | 100 | static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); |
| 101 | 101 | ||
| 102 | NvResult IocCreate(std::span<const u8> input, std::span<u8> output); | 102 | NvResult IocCreate(IocCreateParams& params); |
| 103 | NvResult IocAlloc(std::span<const u8> input, std::span<u8> output); | 103 | NvResult IocAlloc(IocAllocParams& params); |
| 104 | NvResult IocGetId(std::span<const u8> input, std::span<u8> output); | 104 | NvResult IocGetId(IocGetIdParams& params); |
| 105 | NvResult IocFromId(std::span<const u8> input, std::span<u8> output); | 105 | NvResult IocFromId(IocFromIdParams& params); |
| 106 | NvResult IocParam(std::span<const u8> input, std::span<u8> output); | 106 | NvResult IocParam(IocParamParams& params); |
| 107 | NvResult IocFree(std::span<const u8> input, std::span<u8> output); | 107 | NvResult IocFree(IocFreeParams& params); |
| 108 | 108 | ||
| 109 | private: | 109 | private: |
| 110 | /// Id to use for the next handle that is created. | 110 | /// Id to use for the next handle that is created. |
diff --git a/src/core/hle/service/nvnflinger/buffer_transform_flags.h b/src/core/hle/service/nvnflinger/buffer_transform_flags.h index 67aa5dad6..ffe579718 100644 --- a/src/core/hle/service/nvnflinger/buffer_transform_flags.h +++ b/src/core/hle/service/nvnflinger/buffer_transform_flags.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include "common/common_funcs.h" | ||
| 6 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 7 | 8 | ||
| 8 | namespace Service::android { | 9 | namespace Service::android { |
| @@ -21,5 +22,6 @@ enum class BufferTransformFlags : u32 { | |||
| 21 | /// Rotate source image 270 degrees clockwise | 22 | /// Rotate source image 270 degrees clockwise |
| 22 | Rotate270 = 0x07, | 23 | Rotate270 = 0x07, |
| 23 | }; | 24 | }; |
| 25 | DECLARE_ENUM_FLAG_OPERATORS(BufferTransformFlags); | ||
| 24 | 26 | ||
| 25 | } // namespace Service::android | 27 | } // namespace Service::android |
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index 2e29bc848..6dc327b8b 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp | |||
| @@ -71,24 +71,17 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address, | |||
| 71 | R_SUCCEED(); | 71 | R_SUCCEED(); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | template <typename T> | ||
| 75 | std::span<u8> SerializeIoc(T& params) { | ||
| 76 | return std::span(reinterpret_cast<u8*>(std::addressof(params)), sizeof(T)); | ||
| 77 | } | ||
| 78 | |||
| 79 | Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, u32 size) { | 74 | Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, u32 size) { |
| 80 | // Create a handle. | 75 | // Create a handle. |
| 81 | Nvidia::Devices::nvmap::IocCreateParams create_in_params{ | 76 | Nvidia::Devices::nvmap::IocCreateParams create_params{ |
| 82 | .size = size, | 77 | .size = size, |
| 83 | .handle = 0, | 78 | .handle = 0, |
| 84 | }; | 79 | }; |
| 85 | Nvidia::Devices::nvmap::IocCreateParams create_out_params{}; | 80 | R_UNLESS(nvmap.IocCreate(create_params) == Nvidia::NvResult::Success, |
| 86 | R_UNLESS(nvmap.IocCreate(SerializeIoc(create_in_params), SerializeIoc(create_out_params)) == | ||
| 87 | Nvidia::NvResult::Success, | ||
| 88 | VI::ResultOperationFailed); | 81 | VI::ResultOperationFailed); |
| 89 | 82 | ||
| 90 | // Assign the output handle. | 83 | // Assign the output handle. |
| 91 | *out_nv_map_handle = create_out_params.handle; | 84 | *out_nv_map_handle = create_params.handle; |
| 92 | 85 | ||
| 93 | // We succeeded. | 86 | // We succeeded. |
| 94 | R_SUCCEED(); | 87 | R_SUCCEED(); |
| @@ -96,13 +89,10 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, | |||
| 96 | 89 | ||
| 97 | Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) { | 90 | Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) { |
| 98 | // Free the handle. | 91 | // Free the handle. |
| 99 | Nvidia::Devices::nvmap::IocFreeParams free_in_params{ | 92 | Nvidia::Devices::nvmap::IocFreeParams free_params{ |
| 100 | .handle = handle, | 93 | .handle = handle, |
| 101 | }; | 94 | }; |
| 102 | Nvidia::Devices::nvmap::IocFreeParams free_out_params{}; | 95 | R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed); |
| 103 | R_UNLESS(nvmap.IocFree(SerializeIoc(free_in_params), SerializeIoc(free_out_params)) == | ||
| 104 | Nvidia::NvResult::Success, | ||
| 105 | VI::ResultOperationFailed); | ||
| 106 | 96 | ||
| 107 | // We succeeded. | 97 | // We succeeded. |
| 108 | R_SUCCEED(); | 98 | R_SUCCEED(); |
| @@ -111,7 +101,7 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) { | |||
| 111 | Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer, | 101 | Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer, |
| 112 | u32 size) { | 102 | u32 size) { |
| 113 | // Assign the allocated memory to the handle. | 103 | // Assign the allocated memory to the handle. |
| 114 | Nvidia::Devices::nvmap::IocAllocParams alloc_in_params{ | 104 | Nvidia::Devices::nvmap::IocAllocParams alloc_params{ |
| 115 | .handle = handle, | 105 | .handle = handle, |
| 116 | .heap_mask = 0, | 106 | .heap_mask = 0, |
| 117 | .flags = {}, | 107 | .flags = {}, |
| @@ -119,10 +109,7 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce | |||
| 119 | .kind = 0, | 109 | .kind = 0, |
| 120 | .address = GetInteger(buffer), | 110 | .address = GetInteger(buffer), |
| 121 | }; | 111 | }; |
| 122 | Nvidia::Devices::nvmap::IocAllocParams alloc_out_params{}; | 112 | R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed); |
| 123 | R_UNLESS(nvmap.IocAlloc(SerializeIoc(alloc_in_params), SerializeIoc(alloc_out_params)) == | ||
| 124 | Nvidia::NvResult::Success, | ||
| 125 | VI::ResultOperationFailed); | ||
| 126 | 113 | ||
| 127 | // We succeeded. | 114 | // We succeeded. |
| 128 | R_SUCCEED(); | 115 | R_SUCCEED(); |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index fa5273402..84b60a928 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -41,7 +41,7 @@ struct Memory::Impl { | |||
| 41 | explicit Impl(Core::System& system_) : system{system_} {} | 41 | explicit Impl(Core::System& system_) : system{system_} {} |
| 42 | 42 | ||
| 43 | void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { | 43 | void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { |
| 44 | current_page_table = &process.GetPageTable().PageTableImpl(); | 44 | current_page_table = &process.GetPageTable().GetImpl(); |
| 45 | current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); | 45 | current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); |
| 46 | 46 | ||
| 47 | const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth(); | 47 | const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth(); |
| @@ -195,7 +195,7 @@ struct Memory::Impl { | |||
| 195 | 195 | ||
| 196 | bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, | 196 | bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped, |
| 197 | auto on_memory, auto on_rasterizer, auto increment) { | 197 | auto on_memory, auto on_rasterizer, auto increment) { |
| 198 | const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl(); | 198 | const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl(); |
| 199 | std::size_t remaining_size = size; | 199 | std::size_t remaining_size = size; |
| 200 | std::size_t page_index = addr >> YUZU_PAGEBITS; | 200 | std::size_t page_index = addr >> YUZU_PAGEBITS; |
| 201 | std::size_t page_offset = addr & YUZU_PAGEMASK; | 201 | std::size_t page_offset = addr & YUZU_PAGEMASK; |
| @@ -826,7 +826,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress b | |||
| 826 | 826 | ||
| 827 | bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { | 827 | bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { |
| 828 | const Kernel::KProcess& process = *system.ApplicationProcess(); | 828 | const Kernel::KProcess& process = *system.ApplicationProcess(); |
| 829 | const auto& page_table = process.GetPageTable().PageTableImpl(); | 829 | const auto& page_table = process.GetPageTable().GetImpl(); |
| 830 | const size_t page = vaddr >> YUZU_PAGEBITS; | 830 | const size_t page = vaddr >> YUZU_PAGEBITS; |
| 831 | if (page >= page_table.pointers.size()) { | 831 | if (page >= page_table.pointers.size()) { |
| 832 | return false; | 832 | return false; |
diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp index 65cd5aa06..4f1d5b548 100644 --- a/src/video_core/renderer_null/null_rasterizer.cpp +++ b/src/video_core/renderer_null/null_rasterizer.cpp | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include "common/alignment.h" | 4 | #include "common/alignment.h" |
| 5 | #include "core/memory.h" | 5 | #include "core/memory.h" |
| 6 | #include "video_core/control/channel_state.h" | ||
| 6 | #include "video_core/host1x/host1x.h" | 7 | #include "video_core/host1x/host1x.h" |
| 7 | #include "video_core/memory_manager.h" | 8 | #include "video_core/memory_manager.h" |
| 8 | #include "video_core/renderer_null/null_rasterizer.h" | 9 | #include "video_core/renderer_null/null_rasterizer.h" |
| @@ -99,8 +100,14 @@ bool RasterizerNull::AccelerateDisplay(const Tegra::FramebufferConfig& config, | |||
| 99 | } | 100 | } |
| 100 | void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading, | 101 | void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading, |
| 101 | const VideoCore::DiskResourceLoadCallback& callback) {} | 102 | const VideoCore::DiskResourceLoadCallback& callback) {} |
| 102 | void RasterizerNull::InitializeChannel(Tegra::Control::ChannelState& channel) {} | 103 | void RasterizerNull::InitializeChannel(Tegra::Control::ChannelState& channel) { |
| 103 | void RasterizerNull::BindChannel(Tegra::Control::ChannelState& channel) {} | 104 | CreateChannel(channel); |
| 104 | void RasterizerNull::ReleaseChannel(s32 channel_id) {} | 105 | } |
| 106 | void RasterizerNull::BindChannel(Tegra::Control::ChannelState& channel) { | ||
| 107 | BindToChannel(channel.bind_id); | ||
| 108 | } | ||
| 109 | void RasterizerNull::ReleaseChannel(s32 channel_id) { | ||
| 110 | EraseChannel(channel_id); | ||
| 111 | } | ||
| 105 | 112 | ||
| 106 | } // namespace Null | 113 | } // namespace Null |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 52fc142d1..66483a900 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp | |||
| @@ -137,6 +137,56 @@ BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWin | |||
| 137 | 137 | ||
| 138 | BlitScreen::~BlitScreen() = default; | 138 | BlitScreen::~BlitScreen() = default; |
| 139 | 139 | ||
| 140 | static Common::Rectangle<f32> NormalizeCrop(const Tegra::FramebufferConfig& framebuffer, | ||
| 141 | const ScreenInfo& screen_info) { | ||
| 142 | f32 left, top, right, bottom; | ||
| 143 | |||
| 144 | if (!framebuffer.crop_rect.IsEmpty()) { | ||
| 145 | // If crop rectangle is not empty, apply properties from rectangle. | ||
| 146 | left = static_cast<f32>(framebuffer.crop_rect.left); | ||
| 147 | top = static_cast<f32>(framebuffer.crop_rect.top); | ||
| 148 | right = static_cast<f32>(framebuffer.crop_rect.right); | ||
| 149 | bottom = static_cast<f32>(framebuffer.crop_rect.bottom); | ||
| 150 | } else { | ||
| 151 | // Otherwise, fall back to framebuffer dimensions. | ||
| 152 | left = 0; | ||
| 153 | top = 0; | ||
| 154 | right = static_cast<f32>(framebuffer.width); | ||
| 155 | bottom = static_cast<f32>(framebuffer.height); | ||
| 156 | } | ||
| 157 | |||
| 158 | // Apply transformation flags. | ||
| 159 | auto framebuffer_transform_flags = framebuffer.transform_flags; | ||
| 160 | |||
| 161 | if (True(framebuffer_transform_flags & Service::android::BufferTransformFlags::FlipH)) { | ||
| 162 | // Switch left and right. | ||
| 163 | std::swap(left, right); | ||
| 164 | } | ||
| 165 | if (True(framebuffer_transform_flags & Service::android::BufferTransformFlags::FlipV)) { | ||
| 166 | // Switch top and bottom. | ||
| 167 | std::swap(top, bottom); | ||
| 168 | } | ||
| 169 | |||
| 170 | framebuffer_transform_flags &= ~Service::android::BufferTransformFlags::FlipH; | ||
| 171 | framebuffer_transform_flags &= ~Service::android::BufferTransformFlags::FlipV; | ||
| 172 | if (True(framebuffer_transform_flags)) { | ||
| 173 | UNIMPLEMENTED_MSG("Unsupported framebuffer_transform_flags={}", | ||
| 174 | static_cast<u32>(framebuffer_transform_flags)); | ||
| 175 | } | ||
| 176 | |||
| 177 | // Get the screen properties. | ||
| 178 | const f32 screen_width = static_cast<f32>(screen_info.width); | ||
| 179 | const f32 screen_height = static_cast<f32>(screen_info.height); | ||
| 180 | |||
| 181 | // Normalize coordinate space. | ||
| 182 | left /= screen_width; | ||
| 183 | top /= screen_height; | ||
| 184 | right /= screen_width; | ||
| 185 | bottom /= screen_height; | ||
| 186 | |||
| 187 | return Common::Rectangle<f32>(left, top, right, bottom); | ||
| 188 | } | ||
| 189 | |||
| 140 | void BlitScreen::Recreate() { | 190 | void BlitScreen::Recreate() { |
| 141 | present_manager.WaitPresent(); | 191 | present_manager.WaitPresent(); |
| 142 | scheduler.Finish(); | 192 | scheduler.Finish(); |
| @@ -354,17 +404,10 @@ void BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, | |||
| 354 | source_image_view = smaa->Draw(scheduler, image_index, source_image, source_image_view); | 404 | source_image_view = smaa->Draw(scheduler, image_index, source_image, source_image_view); |
| 355 | } | 405 | } |
| 356 | if (fsr) { | 406 | if (fsr) { |
| 357 | auto crop_rect = framebuffer.crop_rect; | 407 | const auto crop_rect = NormalizeCrop(framebuffer, screen_info); |
| 358 | if (crop_rect.GetWidth() == 0) { | 408 | const VkExtent2D fsr_input_size{ |
| 359 | crop_rect.right = framebuffer.width; | 409 | .width = Settings::values.resolution_info.ScaleUp(screen_info.width), |
| 360 | } | 410 | .height = Settings::values.resolution_info.ScaleUp(screen_info.height), |
| 361 | if (crop_rect.GetHeight() == 0) { | ||
| 362 | crop_rect.bottom = framebuffer.height; | ||
| 363 | } | ||
| 364 | crop_rect = crop_rect.Scale(Settings::values.resolution_info.up_factor); | ||
| 365 | VkExtent2D fsr_input_size{ | ||
| 366 | .width = Settings::values.resolution_info.ScaleUp(framebuffer.width), | ||
| 367 | .height = Settings::values.resolution_info.ScaleUp(framebuffer.height), | ||
| 368 | }; | 411 | }; |
| 369 | VkImageView fsr_image_view = | 412 | VkImageView fsr_image_view = |
| 370 | fsr->Draw(scheduler, image_index, source_image_view, fsr_input_size, crop_rect); | 413 | fsr->Draw(scheduler, image_index, source_image_view, fsr_input_size, crop_rect); |
| @@ -1397,61 +1440,37 @@ void BlitScreen::SetUniformData(BufferData& data, const Layout::FramebufferLayou | |||
| 1397 | 1440 | ||
| 1398 | void BlitScreen::SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer, | 1441 | void BlitScreen::SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer, |
| 1399 | const Layout::FramebufferLayout layout) const { | 1442 | const Layout::FramebufferLayout layout) const { |
| 1400 | const auto& framebuffer_transform_flags = framebuffer.transform_flags; | 1443 | f32 left, top, right, bottom; |
| 1401 | const auto& framebuffer_crop_rect = framebuffer.crop_rect; | ||
| 1402 | |||
| 1403 | static constexpr Common::Rectangle<f32> texcoords{0.f, 0.f, 1.f, 1.f}; | ||
| 1404 | auto left = texcoords.left; | ||
| 1405 | auto right = texcoords.right; | ||
| 1406 | |||
| 1407 | switch (framebuffer_transform_flags) { | ||
| 1408 | case Service::android::BufferTransformFlags::Unset: | ||
| 1409 | break; | ||
| 1410 | case Service::android::BufferTransformFlags::FlipV: | ||
| 1411 | // Flip the framebuffer vertically | ||
| 1412 | left = texcoords.right; | ||
| 1413 | right = texcoords.left; | ||
| 1414 | break; | ||
| 1415 | default: | ||
| 1416 | UNIMPLEMENTED_MSG("Unsupported framebuffer_transform_flags={}", | ||
| 1417 | static_cast<u32>(framebuffer_transform_flags)); | ||
| 1418 | break; | ||
| 1419 | } | ||
| 1420 | 1444 | ||
| 1421 | UNIMPLEMENTED_IF(framebuffer_crop_rect.left != 0); | 1445 | if (fsr) { |
| 1422 | 1446 | // FSR has already applied the crop, so we just want to render the image | |
| 1423 | f32 left_start{}; | 1447 | // it has produced. |
| 1424 | if (framebuffer_crop_rect.Top() > 0) { | 1448 | left = 0; |
| 1425 | left_start = static_cast<f32>(framebuffer_crop_rect.Top()) / | 1449 | top = 0; |
| 1426 | static_cast<f32>(framebuffer_crop_rect.Bottom()); | 1450 | right = 1; |
| 1427 | } | 1451 | bottom = 1; |
| 1428 | f32 scale_u = static_cast<f32>(framebuffer.width) / static_cast<f32>(screen_info.width); | 1452 | } else { |
| 1429 | f32 scale_v = static_cast<f32>(framebuffer.height) / static_cast<f32>(screen_info.height); | 1453 | // Get the normalized crop rectangle. |
| 1430 | // Scale the output by the crop width/height. This is commonly used with 1280x720 rendering | 1454 | const auto crop = NormalizeCrop(framebuffer, screen_info); |
| 1431 | // (e.g. handheld mode) on a 1920x1080 framebuffer. | 1455 | |
| 1432 | if (!fsr) { | 1456 | // Apply the crop. |
| 1433 | if (framebuffer_crop_rect.GetWidth() > 0) { | 1457 | left = crop.left; |
| 1434 | scale_u = static_cast<f32>(framebuffer_crop_rect.GetWidth()) / | 1458 | top = crop.top; |
| 1435 | static_cast<f32>(screen_info.width); | 1459 | right = crop.right; |
| 1436 | } | 1460 | bottom = crop.bottom; |
| 1437 | if (framebuffer_crop_rect.GetHeight() > 0) { | ||
| 1438 | scale_v = static_cast<f32>(framebuffer_crop_rect.GetHeight()) / | ||
| 1439 | static_cast<f32>(screen_info.height); | ||
| 1440 | } | ||
| 1441 | } | 1461 | } |
| 1442 | 1462 | ||
| 1463 | // Map the coordinates to the screen. | ||
| 1443 | const auto& screen = layout.screen; | 1464 | const auto& screen = layout.screen; |
| 1444 | const auto x = static_cast<f32>(screen.left); | 1465 | const auto x = static_cast<f32>(screen.left); |
| 1445 | const auto y = static_cast<f32>(screen.top); | 1466 | const auto y = static_cast<f32>(screen.top); |
| 1446 | const auto w = static_cast<f32>(screen.GetWidth()); | 1467 | const auto w = static_cast<f32>(screen.GetWidth()); |
| 1447 | const auto h = static_cast<f32>(screen.GetHeight()); | 1468 | const auto h = static_cast<f32>(screen.GetHeight()); |
| 1448 | data.vertices[0] = ScreenRectVertex(x, y, texcoords.top * scale_u, left_start + left * scale_v); | 1469 | |
| 1449 | data.vertices[1] = | 1470 | data.vertices[0] = ScreenRectVertex(x, y, left, top); |
| 1450 | ScreenRectVertex(x + w, y, texcoords.bottom * scale_u, left_start + left * scale_v); | 1471 | data.vertices[1] = ScreenRectVertex(x + w, y, right, top); |
| 1451 | data.vertices[2] = | 1472 | data.vertices[2] = ScreenRectVertex(x, y + h, left, bottom); |
| 1452 | ScreenRectVertex(x, y + h, texcoords.top * scale_u, left_start + right * scale_v); | 1473 | data.vertices[3] = ScreenRectVertex(x + w, y + h, right, bottom); |
| 1453 | data.vertices[3] = | ||
| 1454 | ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, left_start + right * scale_v); | ||
| 1455 | } | 1474 | } |
| 1456 | 1475 | ||
| 1457 | void BlitScreen::CreateSMAA(VkExtent2D smaa_size) { | 1476 | void BlitScreen::CreateSMAA(VkExtent2D smaa_size) { |
diff --git a/src/video_core/renderer_vulkan/vk_fsr.cpp b/src/video_core/renderer_vulkan/vk_fsr.cpp index ce8f3f3c2..f7a05fbc0 100644 --- a/src/video_core/renderer_vulkan/vk_fsr.cpp +++ b/src/video_core/renderer_vulkan/vk_fsr.cpp | |||
| @@ -34,7 +34,7 @@ FSR::FSR(const Device& device_, MemoryAllocator& memory_allocator_, size_t image | |||
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view, | 36 | VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view, |
| 37 | VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect) { | 37 | VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect) { |
| 38 | 38 | ||
| 39 | UpdateDescriptorSet(image_index, image_view); | 39 | UpdateDescriptorSet(image_index, image_view); |
| 40 | 40 | ||
| @@ -61,15 +61,21 @@ VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView imag | |||
| 61 | 61 | ||
| 62 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline); | 62 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline); |
| 63 | 63 | ||
| 64 | const f32 input_image_width = static_cast<f32>(input_image_extent.width); | ||
| 65 | const f32 input_image_height = static_cast<f32>(input_image_extent.height); | ||
| 66 | const f32 output_image_width = static_cast<f32>(output_size.width); | ||
| 67 | const f32 output_image_height = static_cast<f32>(output_size.height); | ||
| 68 | const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width; | ||
| 69 | const f32 viewport_x = crop_rect.left * input_image_width; | ||
| 70 | const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height; | ||
| 71 | const f32 viewport_y = crop_rect.top * input_image_height; | ||
| 72 | |||
| 64 | std::array<u32, 4 * 4> push_constants; | 73 | std::array<u32, 4 * 4> push_constants; |
| 65 | FsrEasuConOffset( | 74 | FsrEasuConOffset(push_constants.data() + 0, push_constants.data() + 4, |
| 66 | push_constants.data() + 0, push_constants.data() + 4, push_constants.data() + 8, | 75 | push_constants.data() + 8, push_constants.data() + 12, |
| 67 | push_constants.data() + 12, | 76 | |
| 68 | 77 | viewport_width, viewport_height, input_image_width, input_image_height, | |
| 69 | static_cast<f32>(crop_rect.GetWidth()), static_cast<f32>(crop_rect.GetHeight()), | 78 | output_image_width, output_image_height, viewport_x, viewport_y); |
| 70 | static_cast<f32>(input_image_extent.width), static_cast<f32>(input_image_extent.height), | ||
| 71 | static_cast<f32>(output_size.width), static_cast<f32>(output_size.height), | ||
| 72 | static_cast<f32>(crop_rect.left), static_cast<f32>(crop_rect.top)); | ||
| 73 | cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants); | 79 | cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants); |
| 74 | 80 | ||
| 75 | { | 81 | { |
diff --git a/src/video_core/renderer_vulkan/vk_fsr.h b/src/video_core/renderer_vulkan/vk_fsr.h index 8bb9fc23a..3505c1416 100644 --- a/src/video_core/renderer_vulkan/vk_fsr.h +++ b/src/video_core/renderer_vulkan/vk_fsr.h | |||
| @@ -17,7 +17,7 @@ public: | |||
| 17 | explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count, | 17 | explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count, |
| 18 | VkExtent2D output_size); | 18 | VkExtent2D output_size); |
| 19 | VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view, | 19 | VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view, |
| 20 | VkExtent2D input_image_extent, const Common::Rectangle<int>& crop_rect); | 20 | VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect); |
| 21 | 21 | ||
| 22 | private: | 22 | private: |
| 23 | void CreateDescriptorPool(); | 23 | void CreateDescriptorPool(); |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 3983b2eb7..c0e8431e4 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -82,7 +82,7 @@ VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t in | |||
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | if (y_negate) { | 84 | if (y_negate) { |
| 85 | y += height; | 85 | y += conv(static_cast<f32>(regs.surface_clip.height)); |
| 86 | height = -height; | 86 | height = -height; |
| 87 | } | 87 | } |
| 88 | 88 | ||
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h index 74ec4f771..1589ba057 100644 --- a/src/yuzu/configuration/config.h +++ b/src/yuzu/configuration/config.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2014 Citra Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_camera.h b/src/yuzu/configuration/configure_camera.h index 9a90512b3..3d822da7b 100644 --- a/src/yuzu/configuration/configure_camera.h +++ b/src/yuzu/configuration/configure_camera.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // Text : Copyright 2022 yuzu Emulator Project | 1 | // Text : Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-3.0-or-later | 2 | // SPDX-License-Identifier: GPL-3.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp index 3dcad2701..02e23cce6 100644 --- a/src/yuzu/configuration/configure_input.cpp +++ b/src/yuzu/configuration/configure_input.cpp | |||
| @@ -152,7 +152,7 @@ void ConfigureInput::Initialize(InputCommon::InputSubsystem* input_subsystem, | |||
| 152 | connect(player_controllers[0], &ConfigureInputPlayer::HandheldStateChanged, | 152 | connect(player_controllers[0], &ConfigureInputPlayer::HandheldStateChanged, |
| 153 | [this](bool is_handheld) { UpdateDockedState(is_handheld); }); | 153 | [this](bool is_handheld) { UpdateDockedState(is_handheld); }); |
| 154 | 154 | ||
| 155 | advanced = new ConfigureInputAdvanced(this); | 155 | advanced = new ConfigureInputAdvanced(hid_core, this); |
| 156 | ui->tabAdvanced->setLayout(new QHBoxLayout(ui->tabAdvanced)); | 156 | ui->tabAdvanced->setLayout(new QHBoxLayout(ui->tabAdvanced)); |
| 157 | ui->tabAdvanced->layout()->addWidget(advanced); | 157 | ui->tabAdvanced->layout()->addWidget(advanced); |
| 158 | 158 | ||
diff --git a/src/yuzu/configuration/configure_input.h b/src/yuzu/configuration/configure_input.h index 136cd3a0a..beb503dae 100644 --- a/src/yuzu/configuration/configure_input.h +++ b/src/yuzu/configuration/configure_input.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_input_advanced.cpp b/src/yuzu/configuration/configure_input_advanced.cpp index 3cfd5d439..441cea3f6 100644 --- a/src/yuzu/configuration/configure_input_advanced.cpp +++ b/src/yuzu/configuration/configure_input_advanced.cpp | |||
| @@ -4,11 +4,13 @@ | |||
| 4 | #include <QColorDialog> | 4 | #include <QColorDialog> |
| 5 | #include "common/settings.h" | 5 | #include "common/settings.h" |
| 6 | #include "core/core.h" | 6 | #include "core/core.h" |
| 7 | #include "core/hid/emulated_controller.h" | ||
| 8 | #include "core/hid/hid_core.h" | ||
| 7 | #include "ui_configure_input_advanced.h" | 9 | #include "ui_configure_input_advanced.h" |
| 8 | #include "yuzu/configuration/configure_input_advanced.h" | 10 | #include "yuzu/configuration/configure_input_advanced.h" |
| 9 | 11 | ||
| 10 | ConfigureInputAdvanced::ConfigureInputAdvanced(QWidget* parent) | 12 | ConfigureInputAdvanced::ConfigureInputAdvanced(Core::HID::HIDCore& hid_core_, QWidget* parent) |
| 11 | : QWidget(parent), ui(std::make_unique<Ui::ConfigureInputAdvanced>()) { | 13 | : QWidget(parent), ui(std::make_unique<Ui::ConfigureInputAdvanced>()), hid_core{hid_core_} { |
| 12 | ui->setupUi(this); | 14 | ui->setupUi(this); |
| 13 | 15 | ||
| 14 | controllers_color_buttons = {{ | 16 | controllers_color_buttons = {{ |
| @@ -123,6 +125,8 @@ void ConfigureInputAdvanced::ApplyConfiguration() { | |||
| 123 | player.button_color_left = colors[1]; | 125 | player.button_color_left = colors[1]; |
| 124 | player.body_color_right = colors[2]; | 126 | player.body_color_right = colors[2]; |
| 125 | player.button_color_right = colors[3]; | 127 | player.button_color_right = colors[3]; |
| 128 | |||
| 129 | hid_core.GetEmulatedControllerByIndex(player_idx)->ReloadColorsFromSettings(); | ||
| 126 | } | 130 | } |
| 127 | 131 | ||
| 128 | Settings::values.debug_pad_enabled = ui->debug_enabled->isChecked(); | 132 | Settings::values.debug_pad_enabled = ui->debug_enabled->isChecked(); |
diff --git a/src/yuzu/configuration/configure_input_advanced.h b/src/yuzu/configuration/configure_input_advanced.h index fc1230284..41f822c4a 100644 --- a/src/yuzu/configuration/configure_input_advanced.h +++ b/src/yuzu/configuration/configure_input_advanced.h | |||
| @@ -14,11 +14,15 @@ namespace Ui { | |||
| 14 | class ConfigureInputAdvanced; | 14 | class ConfigureInputAdvanced; |
| 15 | } | 15 | } |
| 16 | 16 | ||
| 17 | namespace Core::HID { | ||
| 18 | class HIDCore; | ||
| 19 | } // namespace Core::HID | ||
| 20 | |||
| 17 | class ConfigureInputAdvanced : public QWidget { | 21 | class ConfigureInputAdvanced : public QWidget { |
| 18 | Q_OBJECT | 22 | Q_OBJECT |
| 19 | 23 | ||
| 20 | public: | 24 | public: |
| 21 | explicit ConfigureInputAdvanced(QWidget* parent = nullptr); | 25 | explicit ConfigureInputAdvanced(Core::HID::HIDCore& hid_core_, QWidget* parent = nullptr); |
| 22 | ~ConfigureInputAdvanced() override; | 26 | ~ConfigureInputAdvanced() override; |
| 23 | 27 | ||
| 24 | void ApplyConfiguration(); | 28 | void ApplyConfiguration(); |
| @@ -44,4 +48,6 @@ private: | |||
| 44 | 48 | ||
| 45 | std::array<std::array<QColor, 4>, 8> controllers_colors; | 49 | std::array<std::array<QColor, 4>, 8> controllers_colors; |
| 46 | std::array<std::array<QPushButton*, 4>, 8> controllers_color_buttons; | 50 | std::array<std::array<QPushButton*, 4>, 8> controllers_color_buttons; |
| 51 | |||
| 52 | Core::HID::HIDCore& hid_core; | ||
| 47 | }; | 53 | }; |
diff --git a/src/yuzu/configuration/configure_input_player.h b/src/yuzu/configuration/configure_input_player.h index d3255d2b4..fda09e925 100644 --- a/src/yuzu/configuration/configure_input_player.h +++ b/src/yuzu/configuration/configure_input_player.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_per_game.h b/src/yuzu/configuration/configure_per_game.h index 1a727f32c..cc2513001 100644 --- a/src/yuzu/configuration/configure_per_game.h +++ b/src/yuzu/configuration/configure_per_game.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_profile_manager.cpp b/src/yuzu/configuration/configure_profile_manager.cpp index a47089988..6d2219bf5 100644 --- a/src/yuzu/configuration/configure_profile_manager.cpp +++ b/src/yuzu/configuration/configure_profile_manager.cpp | |||
| @@ -306,10 +306,10 @@ void ConfigureProfileManager::SetUserImage() { | |||
| 306 | return; | 306 | return; |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | // Some games crash when the profile image is too big. Resize any image bigger than 256x256 | 309 | // Profile image must be 256x256 |
| 310 | QImage image(image_path); | 310 | QImage image(image_path); |
| 311 | if (image.width() > 256 || image.height() > 256) { | 311 | if (image.width() != 256 || image.height() != 256) { |
| 312 | image = image.scaled(256, 256, Qt::KeepAspectRatio); | 312 | image = image.scaled(256, 256, Qt::KeepAspectRatioByExpanding, Qt::SmoothTransformation); |
| 313 | if (!image.save(image_path)) { | 313 | if (!image.save(image_path)) { |
| 314 | QMessageBox::warning(this, tr("Error resizing user image"), | 314 | QMessageBox::warning(this, tr("Error resizing user image"), |
| 315 | tr("Unable to resize image")); | 315 | tr("Unable to resize image")); |
diff --git a/src/yuzu/configuration/configure_ringcon.h b/src/yuzu/configuration/configure_ringcon.h index b23c27906..6fd95e2b8 100644 --- a/src/yuzu/configuration/configure_ringcon.h +++ b/src/yuzu/configuration/configure_ringcon.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_tas.h b/src/yuzu/configuration/configure_tas.h index 4a6b0ba4e..a91891906 100644 --- a/src/yuzu/configuration/configure_tas.h +++ b/src/yuzu/configuration/configure_tas.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project | 1 | // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/configuration/configure_touchscreen_advanced.h b/src/yuzu/configuration/configure_touchscreen_advanced.h index 034dc0d46..b6fdffdc8 100644 --- a/src/yuzu/configuration/configure_touchscreen_advanced.h +++ b/src/yuzu/configuration/configure_touchscreen_advanced.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project | 1 | // SPDX-FileCopyrightText: 2016 Citra Emulator Project |
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | 2 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 1bf173efb..f077d7f9c 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -1908,7 +1908,11 @@ void GMainWindow::ConfigureFilesystemProvider(const std::string& filepath) { | |||
| 1908 | void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t program_index, | 1908 | void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t program_index, |
| 1909 | StartGameType type, AmLaunchType launch_type) { | 1909 | StartGameType type, AmLaunchType launch_type) { |
| 1910 | LOG_INFO(Frontend, "yuzu starting..."); | 1910 | LOG_INFO(Frontend, "yuzu starting..."); |
| 1911 | StoreRecentFile(filename); // Put the filename on top of the list | 1911 | |
| 1912 | if (program_id == 0 || | ||
| 1913 | program_id > static_cast<u64>(Service::AM::Applets::AppletProgramId::MaxProgramId)) { | ||
| 1914 | StoreRecentFile(filename); // Put the filename on top of the list | ||
| 1915 | } | ||
| 1912 | 1916 | ||
| 1913 | // Save configurations | 1917 | // Save configurations |
| 1914 | UpdateUISettings(); | 1918 | UpdateUISettings(); |
| @@ -4273,7 +4277,7 @@ void GMainWindow::OnToggleStatusBar() { | |||
| 4273 | } | 4277 | } |
| 4274 | 4278 | ||
| 4275 | void GMainWindow::OnAlbum() { | 4279 | void GMainWindow::OnAlbum() { |
| 4276 | constexpr u64 AlbumId = 0x010000000000100Dull; | 4280 | constexpr u64 AlbumId = static_cast<u64>(Service::AM::Applets::AppletProgramId::PhotoViewer); |
| 4277 | auto bis_system = system->GetFileSystemController().GetSystemNANDContents(); | 4281 | auto bis_system = system->GetFileSystemController().GetSystemNANDContents(); |
| 4278 | if (!bis_system) { | 4282 | if (!bis_system) { |
| 4279 | QMessageBox::warning(this, tr("No firmware available"), | 4283 | QMessageBox::warning(this, tr("No firmware available"), |
| @@ -4292,11 +4296,11 @@ void GMainWindow::OnAlbum() { | |||
| 4292 | 4296 | ||
| 4293 | const auto filename = QString::fromStdString(album_nca->GetFullPath()); | 4297 | const auto filename = QString::fromStdString(album_nca->GetFullPath()); |
| 4294 | UISettings::values.roms_path = QFileInfo(filename).path(); | 4298 | UISettings::values.roms_path = QFileInfo(filename).path(); |
| 4295 | BootGame(filename); | 4299 | BootGame(filename, AlbumId); |
| 4296 | } | 4300 | } |
| 4297 | 4301 | ||
| 4298 | void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) { | 4302 | void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) { |
| 4299 | constexpr u64 CabinetId = 0x0100000000001002ull; | 4303 | constexpr u64 CabinetId = static_cast<u64>(Service::AM::Applets::AppletProgramId::Cabinet); |
| 4300 | auto bis_system = system->GetFileSystemController().GetSystemNANDContents(); | 4304 | auto bis_system = system->GetFileSystemController().GetSystemNANDContents(); |
| 4301 | if (!bis_system) { | 4305 | if (!bis_system) { |
| 4302 | QMessageBox::warning(this, tr("No firmware available"), | 4306 | QMessageBox::warning(this, tr("No firmware available"), |
| @@ -4316,11 +4320,11 @@ void GMainWindow::OnCabinet(Service::NFP::CabinetMode mode) { | |||
| 4316 | 4320 | ||
| 4317 | const auto filename = QString::fromStdString(cabinet_nca->GetFullPath()); | 4321 | const auto filename = QString::fromStdString(cabinet_nca->GetFullPath()); |
| 4318 | UISettings::values.roms_path = QFileInfo(filename).path(); | 4322 | UISettings::values.roms_path = QFileInfo(filename).path(); |
| 4319 | BootGame(filename); | 4323 | BootGame(filename, CabinetId); |
| 4320 | } | 4324 | } |
| 4321 | 4325 | ||
| 4322 | void GMainWindow::OnMiiEdit() { | 4326 | void GMainWindow::OnMiiEdit() { |
| 4323 | constexpr u64 MiiEditId = 0x0100000000001009ull; | 4327 | constexpr u64 MiiEditId = static_cast<u64>(Service::AM::Applets::AppletProgramId::MiiEdit); |
| 4324 | auto bis_system = system->GetFileSystemController().GetSystemNANDContents(); | 4328 | auto bis_system = system->GetFileSystemController().GetSystemNANDContents(); |
| 4325 | if (!bis_system) { | 4329 | if (!bis_system) { |
| 4326 | QMessageBox::warning(this, tr("No firmware available"), | 4330 | QMessageBox::warning(this, tr("No firmware available"), |
| @@ -4339,7 +4343,7 @@ void GMainWindow::OnMiiEdit() { | |||
| 4339 | 4343 | ||
| 4340 | const auto filename = QString::fromStdString((mii_applet_nca->GetFullPath())); | 4344 | const auto filename = QString::fromStdString((mii_applet_nca->GetFullPath())); |
| 4341 | UISettings::values.roms_path = QFileInfo(filename).path(); | 4345 | UISettings::values.roms_path = QFileInfo(filename).path(); |
| 4342 | BootGame(filename); | 4346 | BootGame(filename, MiiEditId); |
| 4343 | } | 4347 | } |
| 4344 | 4348 | ||
| 4345 | void GMainWindow::OnCaptureScreenshot() { | 4349 | void GMainWindow::OnCaptureScreenshot() { |
diff --git a/src/yuzu/vk_device_info.cpp b/src/yuzu/vk_device_info.cpp index 92f10d315..ab0d39c25 100644 --- a/src/yuzu/vk_device_info.cpp +++ b/src/yuzu/vk_device_info.cpp | |||
| @@ -31,6 +31,7 @@ void PopulateRecords(std::vector<Record>& records, QWindow* window) try { | |||
| 31 | // Create a test window with a Vulkan surface type for checking present modes. | 31 | // Create a test window with a Vulkan surface type for checking present modes. |
| 32 | QWindow test_window(window); | 32 | QWindow test_window(window); |
| 33 | test_window.setSurfaceType(QWindow::VulkanSurface); | 33 | test_window.setSurfaceType(QWindow::VulkanSurface); |
| 34 | test_window.create(); | ||
| 34 | auto wsi = QtCommon::GetWindowSystemInfo(&test_window); | 35 | auto wsi = QtCommon::GetWindowSystemInfo(&test_window); |
| 35 | 36 | ||
| 36 | vk::InstanceDispatch dld; | 37 | vk::InstanceDispatch dld; |