From 8b9840b32f350455dbfe59542e4fc2c3a88f5351 Mon Sep 17 00:00:00 2001 From: v00863305 Date: Wed, 10 Jan 2024 14:00:33 +0300 Subject: [PATCH 1/5] Customize unlocked frame culling Fine-granted reduction of GPU texture memory consumption by browser application with multiple offscreen frames (tabs), could be achieved by using of custom frame eviction limits (periodic delay, explicit count limit) which provides possibility to release tile GPU backing resources (shared images) from evicted Viz surfaces (compositor frames). - periodic delay (5 mins) - enabled by default - explicit count limit (10 frames) - disabled by default To configure custom frame eviction please use the following features: --enabled-features=AggressiveFrameCulling:delay/5m, ExplicitFrameCullingLimit:max-count/10 Signed-off-by: Volykhin Andrei Change-Id: I6505f81b335d13edc146a26505805b04524658d5 --- components/viz/client/frame_eviction_manager.cc | 11 +++++++++-- components/viz/common/features.cc | 11 +++++++++++ components/viz/common/features.h | 8 ++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/components/viz/client/frame_eviction_manager.cc b/components/viz/client/frame_eviction_manager.cc index bc70b77c66..8a722b997b 100644 --- a/components/viz/client/frame_eviction_manager.cc +++ b/components/viz/client/frame_eviction_manager.cc @@ -105,7 +105,7 @@ void FrameEvictionManager::RegisterUnlockedFrame( // Unretained: `idle_frames_culling_timer_` is a member of `this`, doesn't // outlive it, and cancels the task in its destructor. idle_frames_culling_timer_.Start( - FROM_HERE, kPeriodicCullingDelay, + FROM_HERE, features::kAggressiveFrameCullingDelay.Get(), base::BindRepeating(&FrameEvictionManager::CullOldUnlockedFrames, base::Unretained(this))); } @@ -161,7 +161,13 @@ FrameEvictionManager::FrameEvictionManager() switches::kMaxNumberOfSavedFrames)) { max_number_of_saved_frames_ = kMaxNumberOfSavedFrames; } +#endif +#if BUILDFLAG(IS_OHOS) + if (base::FeatureList::IsEnabled(features::kExplicitFrameCullingLimit)) { + max_number_of_saved_frames_ = + std::max(1, features::kExplicitFrameCullingLimitMaxCount.Get()); + } #endif } @@ -193,7 +199,8 @@ void FrameEvictionManager::CullOldUnlockedFrames() { auto now = clock_->NowTicks(); while (!unlocked_frames_.empty() && - now - unlocked_frames_.back().second >= kPeriodicCullingDelay) { + now - unlocked_frames_.back().second >= + features::kAggressiveFrameCullingDelay.Get()) { size_t old_size = unlocked_frames_.size(); auto* frame = unlocked_frames_.back().first; frame->EvictCurrentFrame(); diff --git a/components/viz/common/features.cc b/components/viz/common/features.cc index ebc18015f2..93fdfe7c76 100644 --- a/components/viz/common/features.cc +++ b/components/viz/common/features.cc @@ -197,6 +197,8 @@ BASE_FEATURE(kAllowUndamagedNonrootRenderPassToSkip, BASE_FEATURE(kAggressiveFrameCulling, "AggressiveFrameCulling", base::FEATURE_ENABLED_BY_DEFAULT); +const base::FeatureParam kAggressiveFrameCullingDelay{ + &kAggressiveFrameCulling, "delay", base::Minutes(5)}; // If enabled, do not rely on surface garbage collection to happen // periodically, but trigger it eagerly, to avoid missing calls. @@ -259,6 +261,15 @@ BASE_FEATURE(kOnBeginFrameAllowLateAcks, "OnBeginFrameAllowLateAcks", base::FEATURE_DISABLED_BY_DEFAULT); +#if BUILDFLAG(IS_OHOS) +// If enabled, define explicit culling limit for *all* frames. +BASE_FEATURE(kExplicitFrameCullingLimit, + "ExplicitFrameCullingLimit", + base::FEATURE_DISABLED_BY_DEFAULT); +const base::FeatureParam kExplicitFrameCullingLimitMaxCount{ + &kExplicitFrameCullingLimit, "max-count", 10}; +#endif + bool IsDelegatedCompositingEnabled() { return base::FeatureList::IsEnabled(kDelegatedCompositing); } diff --git a/components/viz/common/features.h b/components/viz/common/features.h index 2fc330a441..0fb69a339d 100644 --- a/components/viz/common/features.h +++ b/components/viz/common/features.h @@ -59,6 +59,8 @@ VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kDrawPredictedInkPoint); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kAllowBypassRenderPassQuads); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kAllowUndamagedNonrootRenderPassToSkip); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kAggressiveFrameCulling); +VIZ_COMMON_EXPORT extern const base::FeatureParam + kAggressiveFrameCullingDelay; VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kEagerSurfaceGarbageCollection); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kOverrideThrottledFrameRateParams); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kRendererAllocatesImages); @@ -67,6 +69,12 @@ VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kEvictSubtree); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kOnBeginFrameAcks); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kOnBeginFrameAllowLateAcks); +#if BUILDFLAG(IS_OHOS) +VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kExplicitFrameCullingLimit); +VIZ_COMMON_EXPORT extern const base::FeatureParam + kExplicitFrameCullingLimitMaxCount; +#endif + VIZ_COMMON_EXPORT extern const char kDraw1Point12Ms[]; VIZ_COMMON_EXPORT extern const char kDraw2Points6Ms[]; VIZ_COMMON_EXPORT extern const char kDraw1Point6Ms[]; -- Gitee From e4af894d407f5df91841da7553233113659767f1 Mon Sep 17 00:00:00 2001 From: v00863305 Date: Fri, 2 Feb 2024 09:50:31 +0300 Subject: [PATCH 2/5] First frame activation deadline Allow to use the specified deadline policy on the first frame drawing (show with visibility) to synchronize web content with browser UI. Having HarmonyOS display a placeholder (optional) for a longer period of time is preferable to drawing nothing, and the first frame can take a while on low-end systems. --enable-features=FirstFrameActivationDeadline OHOS external begin frame source should adjust MISSED begin frame arguments to the current time otherwise surface activation deadline will be resolved incorrectly. VSync (MISSED) -> ....do nothing ... -> NeedsBeginFrame -> BeginFrame (with old MISSED begin frame args) -> ... -> CommitFrame (deadline resolved against old MISSED begin frame args) Signed-off-by: Volykhin Andrei Change-Id: I1cad430d838cff67ed735e2af2e901b68a4f2725 --- components/viz/common/features.cc | 8 +++++++ components/viz/common/features.h | 3 +++ .../common/frame_sinks/begin_frame_source.cc | 7 +++--- .../external_begin_frame_source_ohos.cc | 24 +++++++++++++++++++ .../external_begin_frame_source_ohos.h | 3 +++ components/viz/service/surfaces/surface.cc | 4 ++++ .../renderer_host/delegated_frame_host.cc | 21 ++++++++++++++++ 7 files changed, 66 insertions(+), 4 deletions(-) diff --git a/components/viz/common/features.cc b/components/viz/common/features.cc index 93fdfe7c76..8b1908aab7 100644 --- a/components/viz/common/features.cc +++ b/components/viz/common/features.cc @@ -268,6 +268,14 @@ BASE_FEATURE(kExplicitFrameCullingLimit, base::FEATURE_DISABLED_BY_DEFAULT); const base::FeatureParam kExplicitFrameCullingLimitMaxCount{ &kExplicitFrameCullingLimit, "max-count", 10}; + +// If enabled, wait to activate a surface with dependencies on the first frame +// drawing with specified deadline. +BASE_FEATURE(kFirstFrameActivationDeadline, + "FirstFrameActivationDeadline", + base::FEATURE_ENABLED_BY_DEFAULT); +const base::FeatureParam kFirstFrameActivationDeadlineTimeout{ + &kFirstFrameActivationDeadline, "timeout", base::Seconds(5)}; #endif bool IsDelegatedCompositingEnabled() { diff --git a/components/viz/common/features.h b/components/viz/common/features.h index 0fb69a339d..15b6f52c1a 100644 --- a/components/viz/common/features.h +++ b/components/viz/common/features.h @@ -73,6 +73,9 @@ VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kOnBeginFrameAllowLateAcks); VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kExplicitFrameCullingLimit); VIZ_COMMON_EXPORT extern const base::FeatureParam kExplicitFrameCullingLimitMaxCount; +VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kFirstFrameActivationDeadline); +VIZ_COMMON_EXPORT extern const base::FeatureParam + kFirstFrameActivationDeadlineTimeout; #endif VIZ_COMMON_EXPORT extern const char kDraw1Point12Ms[]; diff --git a/components/viz/common/frame_sinks/begin_frame_source.cc b/components/viz/common/frame_sinks/begin_frame_source.cc index 2a33b47794..ac92c8bbcc 100644 --- a/components/viz/common/frame_sinks/begin_frame_source.cc +++ b/components/viz/common/frame_sinks/begin_frame_source.cc @@ -491,10 +491,9 @@ void ExternalBeginFrameSource::OnBeginFrame(const BeginFrameArgs& args) { return; } - TRACE_EVENT2( - "viz", "ExternalBeginFrameSource::OnBeginFrame", "frame_time", - last_begin_frame_args_.frame_time.since_origin().InMicroseconds(), - "interval", last_begin_frame_args_.interval.InMicroseconds()); + TRACE_EVENT2("viz", "ExternalBeginFrameSource::OnBeginFrame", "frame_time", + args.frame_time.since_origin().InMicroseconds(), "interval", + args.interval.InMicroseconds()); last_begin_frame_args_ = args; base::flat_set observers(observers_); diff --git a/components/viz/service/frame_sinks/external_begin_frame_source_ohos.cc b/components/viz/service/frame_sinks/external_begin_frame_source_ohos.cc index d37717a485..3ce2f59504 100644 --- a/components/viz/service/frame_sinks/external_begin_frame_source_ohos.cc +++ b/components/viz/service/frame_sinks/external_begin_frame_source_ohos.cc @@ -229,6 +229,30 @@ ReportLossFrame::GetInstance()->SetVsyncPeriod(vsync_period_); } } +BeginFrameArgs ExternalBeginFrameSourceOHOS::GetMissedBeginFrameArgs( + BeginFrameObserver* obs) { + auto frame_time = last_begin_frame_args_.frame_time; + auto interval = last_begin_frame_args_.interval; + auto now = base::TimeTicks::Now(); + + if (last_begin_frame_args_.IsValid()) { + frame_time = now.SnappedToNextTick(frame_time, interval) - interval; + } else { + // Create BeginFrameArgs for now so that we don't have to wait until vsync. + frame_time = now; + interval = BeginFrameArgs::DefaultInterval(); + } + + // Don't create new args unless we've actually moved past the previous frame. + if (!last_begin_frame_args_.IsValid() || + frame_time > last_begin_frame_args_.frame_time) { + last_begin_frame_args_ = begin_frame_args_generator_.GenerateBeginFrameArgs( + source_id(), frame_time, frame_time + interval, interval); + } + + return ExternalBeginFrameSource::GetMissedBeginFrameArgs(obs); +} + void ExternalBeginFrameSourceOHOS::OnNeedsBeginFrames(bool needs_begin_frames) { SetEnabled(needs_begin_frames); } diff --git a/components/viz/service/frame_sinks/external_begin_frame_source_ohos.h b/components/viz/service/frame_sinks/external_begin_frame_source_ohos.h index e56bf67ed6..7cb1304634 100644 --- a/components/viz/service/frame_sinks/external_begin_frame_source_ohos.h +++ b/components/viz/service/frame_sinks/external_begin_frame_source_ohos.h @@ -63,6 +63,9 @@ class VIZ_SERVICE_EXPORT ExternalBeginFrameSourceOHOS void ResetVSyncFrequency() override; private: + // ExternalBeginFrameSource overrides. + BeginFrameArgs GetMissedBeginFrameArgs(BeginFrameObserver* obs) override; + // ExternalBeginFrameSourceClient implementation. void OnNeedsBeginFrames(bool needs_begin_frames) override; diff --git a/components/viz/service/surfaces/surface.cc b/components/viz/service/surfaces/surface.cc index 499c903155..e7282a7b0e 100644 --- a/components/viz/service/surfaces/surface.cc +++ b/components/viz/service/surfaces/surface.cc @@ -354,6 +354,10 @@ void Surface::OnActivationDependencyResolved( blocking_allocation_groups_.erase(group); if (!activation_dependencies_.empty()) return; + + TRACE_EVENT_NESTABLE_ASYNC_END0("viz", "SurfaceQueuedPending", + TRACE_ID_LOCAL(this)); + // All blockers have been cleared. The surface can be activated now. ActivatePendingFrame(); } diff --git a/content/browser/renderer_host/delegated_frame_host.cc b/content/browser/renderer_host/delegated_frame_host.cc index 65766e9570..7b5062a283 100644 --- a/content/browser/renderer_host/delegated_frame_host.cc +++ b/content/browser/renderer_host/delegated_frame_host.cc @@ -39,6 +39,21 @@ namespace { // factor. constexpr float kFrameContentCaptureQuality = 0.4f; +#if BUILDFLAG(IS_OHOS) +cc::DeadlinePolicy FirstFrameDeadlinePolicy() { + if (base::FeatureList::IsEnabled( + features::kFirstFrameActivationDeadline)) { + // Wait up to deadline timeout for the first frame to be produced. + int64_t deadline_in_frames = base::ClampRound( + features::kFirstFrameActivationDeadlineTimeout.Get() / + viz::BeginFrameArgs::DefaultInterval()); + return cc::DeadlinePolicy::UseSpecifiedDeadline(deadline_in_frames); + } + + return cc::DeadlinePolicy::UseDefaultDeadline(); +} +#endif + } // namespace //////////////////////////////////////////////////////////////////////////////// @@ -96,10 +111,16 @@ void DelegatedFrameHost::WasShown( std::move(record_tab_switch_time_request))); } +#if BUILDFLAG(IS_OHOS) + // Use the specified deadline to synchronize web content with browser UI. + EmbedSurface( + new_local_surface_id, new_dip_size, FirstFrameDeadlinePolicy()); +#else // Use the default deadline to synchronize web content with browser UI. // TODO(fsamuel): Investigate if there is a better deadline to use here. EmbedSurface(new_local_surface_id, new_dip_size, cc::DeadlinePolicy::UseDefaultDeadline()); +#endif // Remove stale content that might be displayed. if (stale_content_layer_->has_external_content()) { -- Gitee From 5b93a584507decb793d22aa4597fcb3de7537cc2 Mon Sep 17 00:00:00 2001 From: v00863305 Date: Tue, 6 Feb 2024 16:52:30 +0300 Subject: [PATCH 3/5] Allow UI compositor root surface eviction Allows to evict root surface (UI compositor) by submission empty local compositor frame to unref resources associated with previous presented aggregated frame from viz compositor (including web contents resources from referenced surfaces to embedded renderers which are marked for destruction). Option "preserve-output-content" will allow to preserve the latest drawn content on output surface on view hide. On chromium level (enabled by default): --enabled-features=EvictRootSurface:preserve-output-content/true On system level (enabled by default): persist.web.root_surface_eviction.enable true Related upstream issue: https://issues.chromium.org/issues/40273186 Signed-off-by: Volykhin Andrei Change-Id: Idf5d4d25b86788f5600e9ff2c5afa0bcfa245d87 --- components/viz/common/features.cc | 7 ++ components/viz/common/features.h | 3 + .../common/quads/compositor_frame_metadata.h | 12 +- .../viz/service/display/direct_renderer.cc | 6 + .../viz/service/display/direct_renderer.h | 8 ++ components/viz/service/display/display.cc | 11 ++ components/viz/service/display/display.h | 13 +++ .../viz/service/display/display_unittest.cc | 3 +- .../viz/service/display/null_renderer.h | 3 + .../viz/service/display/skia_renderer.cc | 30 +++++ .../viz/service/display/skia_renderer.h | 3 + .../viz/service/display/software_renderer.cc | 6 + .../viz/service/display/software_renderer.h | 3 + .../compositor_frame_sink_support.cc | 58 ++++++++-- .../compositor_frame_sink_support.h | 25 +++- .../compositor_frame_sink_support_unittest.cc | 9 +- .../frame_sinks/frame_sink_manager_impl.cc | 18 ++- .../frame_sink_manager_unittest.cc | 6 + .../root_compositor_frame_sink_impl.cc | 108 +++++++++++++++++- .../root_compositor_frame_sink_impl.h | 8 +- components/viz/service/surfaces/surface.cc | 5 +- .../viz/service/surfaces/surface_unittest.cc | 4 +- .../viz/test/fake_host_frame_sink_client.h | 3 +- .../renderer_host/delegated_frame_host.cc | 20 ++++ .../renderer_host/delegated_frame_host.h | 4 + 25 files changed, 336 insertions(+), 40 deletions(-) diff --git a/components/viz/common/features.cc b/components/viz/common/features.cc index 8b1908aab7..740acd9e25 100644 --- a/components/viz/common/features.cc +++ b/components/viz/common/features.cc @@ -276,6 +276,13 @@ BASE_FEATURE(kFirstFrameActivationDeadline, base::FEATURE_ENABLED_BY_DEFAULT); const base::FeatureParam kFirstFrameActivationDeadlineTimeout{ &kFirstFrameActivationDeadline, "timeout", base::Seconds(5)}; + +// If enabled, should include the root surface for eviction. +BASE_FEATURE(kEvictRootSurface, + "EvictRootSurface", + base::FEATURE_ENABLED_BY_DEFAULT); +const base::FeatureParam kPreserveOutputContentOnEvictRootSurface{ + &kEvictRootSurface, "preserve-output-content", true}; #endif bool IsDelegatedCompositingEnabled() { diff --git a/components/viz/common/features.h b/components/viz/common/features.h index 15b6f52c1a..33d3749218 100644 --- a/components/viz/common/features.h +++ b/components/viz/common/features.h @@ -76,6 +76,9 @@ VIZ_COMMON_EXPORT extern const base::FeatureParam VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kFirstFrameActivationDeadline); VIZ_COMMON_EXPORT extern const base::FeatureParam kFirstFrameActivationDeadlineTimeout; +VIZ_COMMON_EXPORT BASE_DECLARE_FEATURE(kEvictRootSurface); +VIZ_COMMON_EXPORT extern const base::FeatureParam + kPreserveOutputContentOnEvictRootSurface; #endif VIZ_COMMON_EXPORT extern const char kDraw1Point12Ms[]; diff --git a/components/viz/common/quads/compositor_frame_metadata.h b/components/viz/common/quads/compositor_frame_metadata.h index 98dc3d2da9..a81bfdb936 100644 --- a/components/viz/common/quads/compositor_frame_metadata.h +++ b/components/viz/common/quads/compositor_frame_metadata.h @@ -35,6 +35,11 @@ namespace viz { +// A frame token value of 0 indicates an invalid or local frame token. A +// local frame token is used inside viz when it creates its own CompositorFrame +// for a surface. +inline constexpr uint32_t kInvalidOrLocalFrameToken = 0; + // Compares two frame tokens, handling cases where the token wraps around the // 32-bit max value. inline bool FrameTokenGT(uint32_t token1, uint32_t token2) { @@ -46,15 +51,16 @@ inline bool FrameTokenGT(uint32_t token1, uint32_t token2) { class VIZ_COMMON_EXPORT FrameTokenGenerator { public: inline uint32_t operator++() { - if (++frame_token_ == 0) + if (++frame_token_ == kInvalidOrLocalFrameToken) { ++frame_token_; + } return frame_token_; } inline uint32_t operator*() const { return frame_token_; } private: - uint32_t frame_token_ = 0; + uint32_t frame_token_ = kInvalidOrLocalFrameToken; }; class VIZ_COMMON_EXPORT CompositorFrameMetadata { @@ -143,7 +149,7 @@ class VIZ_COMMON_EXPORT CompositorFrameMetadata { // after the 32-bit max value. // TODO(crbug.com/850386): A custom type would be better to avoid incorrect // comparisons. - uint32_t frame_token = 0; + uint32_t frame_token = kInvalidOrLocalFrameToken; // Once the display compositor processes a frame with // |send_frame_token_to_embedder| flag turned on, the |frame_token| for the diff --git a/components/viz/service/display/direct_renderer.cc b/components/viz/service/display/direct_renderer.cc index 9f6a5f6eda..3869ec1530 100644 --- a/components/viz/service/display/direct_renderer.cc +++ b/components/viz/service/display/direct_renderer.cc @@ -1171,4 +1171,10 @@ gpu::Mailbox DirectRenderer::GetPrimaryPlaneOverlayTestingMailbox() { return gpu::Mailbox(); } +#if BUILDFLAG(IS_OHOS) +void DirectRenderer::EvictRenderPassAllocations() { + RemoveRenderPassResources(); +} +#endif + } // namespace viz diff --git a/components/viz/service/display/direct_renderer.h b/components/viz/service/display/direct_renderer.h index 93ea1359d1..5305ad0dfd 100644 --- a/components/viz/service/display/direct_renderer.h +++ b/components/viz/service/display/direct_renderer.h @@ -199,6 +199,11 @@ class VIZ_SERVICE_EXPORT DirectRenderer { // Return the bounding rect of previously drawn delegated ink trail. gfx::Rect GetDelegatedInkTrailDamageRect(); +#if BUILDFLAG(IS_OHOS) + // Evicts allocated resources for render passes. + void EvictRenderPassAllocations(); +#endif + protected: friend class BspWalkActionDrawPolygon; friend class SkiaDelegatedInkRendererTest; @@ -279,6 +284,9 @@ class VIZ_SERVICE_EXPORT DirectRenderer { virtual void AllocateRenderPassResourceIfNeeded( const AggregatedRenderPassId& render_pass_id, const RenderPassRequirements& requirements) = 0; +#if BUILDFLAG(IS_OHOS) + virtual void RemoveRenderPassResources() = 0; +#endif virtual bool IsRenderPassResourceAllocated( const AggregatedRenderPassId& render_pass_id) const = 0; virtual gfx::Size GetRenderPassBackingPixelSize( diff --git a/components/viz/service/display/display.cc b/components/viz/service/display/display.cc index 288c27e59d..eacf72a518 100644 --- a/components/viz/service/display/display.cc +++ b/components/viz/service/display/display.cc @@ -960,6 +960,10 @@ bool Display::DrawAndSwap(const DrawAndSwapParams& params) { bool should_draw = have_copy_requests || (have_damage && size_matches); client_->DisplayWillDrawAndSwap(should_draw, &frame.render_pass_list); +#if BUILDFLAG(IS_OHOS) + should_draw &= !skip_draw_and_evict_resources_on_current_frame_; +#endif + absl::optional draw_timer; if (should_draw) { TRACE_EVENT_ASYNC_STEP_INTO0("viz,benchmark", @@ -1009,6 +1013,13 @@ bool Display::DrawAndSwap(const DrawAndSwapParams& params) { std::move(frame.surface_damage_rect_list_)); } else { TRACE_EVENT_INSTANT0("viz", "Draw skipped.", TRACE_EVENT_SCOPE_THREAD); + +#if BUILDFLAG(IS_OHOS) + if (skip_draw_and_evict_resources_on_current_frame_) { + skip_draw_and_evict_resources_on_current_frame_ = false; + renderer_->EvictRenderPassAllocations(); + } +#endif } bool should_swap = !disable_swap_until_resize_ && should_draw && size_matches; diff --git a/components/viz/service/display/display.h b/components/viz/service/display/display.h index 0b39f0e762..986c3bfd5b 100644 --- a/components/viz/service/display/display.h +++ b/components/viz/service/display/display.h @@ -147,6 +147,13 @@ class VIZ_SERVICE_EXPORT Display : public DisplaySchedulerClient, // may be run immediately. void DisableSwapUntilResize(base::OnceClosure no_pending_swaps_callback); +#if BUILDFLAG(IS_OHOS) + // Stop drawing the current frame and evict the allocated resources. + void SetSkipDrawAndEvictResourcesOnCurrentFrame() { + skip_draw_and_evict_resources_on_current_frame_ = true; + } +#endif + #if defined(OHOS_COMPOSITE_RENDER) void SetShouldFrameSubmissionBeforeDraw(bool should); void SetDrawRect(const gfx::Rect& new_rect); @@ -196,6 +203,8 @@ class VIZ_SERVICE_EXPORT Display : public DisplaySchedulerClient, mojom::CompositorFrameSinkType* type) override; bool has_scheduler() const { return !!scheduler_; } + bool visible() const { return visible_; } + const RendererSettings& settings() const { return settings_; } DirectRenderer* renderer_for_testing() const { return renderer_.get(); } bool resize_based_on_root_surface() const { @@ -340,6 +349,10 @@ class VIZ_SERVICE_EXPORT Display : public DisplaySchedulerClient, bool disable_swap_until_resize_ = true; +#if BUILDFLAG(IS_OHOS) + bool skip_draw_and_evict_resources_on_current_frame_ = false; +#endif + // Callback that will be run after all pending swaps have acked. base::OnceClosure no_pending_swaps_callback_; diff --git a/components/viz/service/display/display_unittest.cc b/components/viz/service/display/display_unittest.cc index 70cb6c821f..2eb5999226 100644 --- a/components/viz/service/display/display_unittest.cc +++ b/components/viz/service/display/display_unittest.cc @@ -3432,7 +3432,8 @@ TEST_P(OnBeginFrameAcksDisplayTest, CompositorFrameWithPresentationToken) { display_->Resize(display_size); const gfx::Size sub_surface_size(32, 32); - uint32_t frame_token_1 = 0, frame_token_2 = 0; + uint32_t frame_token_1 = kInvalidOrLocalFrameToken; + uint32_t frame_token_2 = kInvalidOrLocalFrameToken; { CompositorFrame frame = CompositorFrameBuilder() diff --git a/components/viz/service/display/null_renderer.h b/components/viz/service/display/null_renderer.h index faa9c2d9a9..b57ad3560d 100644 --- a/components/viz/service/display/null_renderer.h +++ b/components/viz/service/display/null_renderer.h @@ -32,6 +32,9 @@ class VIZ_SERVICE_EXPORT NullRenderer : public DirectRenderer { void AllocateRenderPassResourceIfNeeded( const AggregatedRenderPassId& render_pass_id, const RenderPassRequirements& requirements) override {} +#if BUILDFLAG(IS_OHOS) + void RemoveRenderPassResources() override {} +#endif bool IsRenderPassResourceAllocated( const AggregatedRenderPassId& render_pass_id) const override; gfx::Size GetRenderPassBackingPixelSize( diff --git a/components/viz/service/display/skia_renderer.cc b/components/viz/service/display/skia_renderer.cc index 933e26ac7d..b8293061be 100644 --- a/components/viz/service/display/skia_renderer.cc +++ b/components/viz/service/display/skia_renderer.cc @@ -3295,6 +3295,36 @@ void SkiaRenderer::AllocateRenderPassResourceIfNeeded( requirements.scanout_dcomp_surface})); } +#if BUILDFLAG(IS_OHOS) +void SkiaRenderer::RemoveRenderPassResources() { + std::vector passes_to_delete; + for (const auto& [backing_id, backing] : render_pass_backings_) { + // Buffer queue's root manages the root pass backing and its bookkeeping + // separately from other render pass backings. + if (!(buffer_queue_ && backing.is_root)) { + passes_to_delete.push_back(backing_id); + } + } + + // Delete RenderPass backings that will not be used. + for (size_t i = 0; i < passes_to_delete.size(); ++i) { + auto it = render_pass_backings_.find(passes_to_delete[i]); + auto& backing = it->second; + // Root render pass backings managed by |buffer_queue_| are not managed by + // DisplayResourceProvider, so we should not destroy them here. This + // reallocation is done in Reshape before drawing the frame + if (!(buffer_queue_ && backing.is_root)) { + skia_output_surface_->DestroySharedImage(backing.mailbox); + } + render_pass_backings_.erase(it); + } + + if (!passes_to_delete.empty()) { + skia_output_surface_->RemoveRenderPassResource(std::move(passes_to_delete)); + } +} +#endif + void SkiaRenderer::FlushOutputSurface() { auto sync_token = skia_output_surface_->Flush(); lock_set_for_external_use_->UnlockResources(sync_token); diff --git a/components/viz/service/display/skia_renderer.h b/components/viz/service/display/skia_renderer.h index 03bd3b7ffd..e7579b8f87 100644 --- a/components/viz/service/display/skia_renderer.h +++ b/components/viz/service/display/skia_renderer.h @@ -86,6 +86,9 @@ class VIZ_SERVICE_EXPORT SkiaRenderer : public DirectRenderer { void AllocateRenderPassResourceIfNeeded( const AggregatedRenderPassId& render_pass_id, const RenderPassRequirements& requirements) override; +#if BUILDFLAG(IS_OHOS) + void RemoveRenderPassResources() override; +#endif bool IsRenderPassResourceAllocated( const AggregatedRenderPassId& render_pass_id) const override; gfx::Size GetRenderPassBackingPixelSize( diff --git a/components/viz/service/display/software_renderer.cc b/components/viz/service/display/software_renderer.cc index f084ef7865..697e8e8c11 100644 --- a/components/viz/service/display/software_renderer.cc +++ b/components/viz/service/display/software_renderer.cc @@ -976,6 +976,12 @@ void SoftwareRenderer::AllocateRenderPassResourceIfNeeded( render_pass_bitmaps_.emplace(render_pass_id, std::move(bitmap)); } +#if BUILDFLAG(IS_OHOS) +void SoftwareRenderer::RemoveRenderPassResources() { + render_pass_bitmaps_.clear(); +} +#endif + bool SoftwareRenderer::IsRenderPassResourceAllocated( const AggregatedRenderPassId& render_pass_id) const { auto it = render_pass_bitmaps_.find(render_pass_id); diff --git a/components/viz/service/display/software_renderer.h b/components/viz/service/display/software_renderer.h index 94bc3b7a2a..59efca387f 100644 --- a/components/viz/service/display/software_renderer.h +++ b/components/viz/service/display/software_renderer.h @@ -53,6 +53,9 @@ class VIZ_SERVICE_EXPORT SoftwareRenderer : public DirectRenderer { void AllocateRenderPassResourceIfNeeded( const AggregatedRenderPassId& render_pass_id, const RenderPassRequirements& requirements) override; +#if BUILDFLAG(IS_OHOS) + void RemoveRenderPassResources() override; +#endif bool IsRenderPassResourceAllocated( const AggregatedRenderPassId& render_pass_id) const override; gfx::Size GetRenderPassBackingPixelSize( diff --git a/components/viz/service/frame_sinks/compositor_frame_sink_support.cc b/components/viz/service/frame_sinks/compositor_frame_sink_support.cc index 7f0ec5fc3c..0b709054ea 100644 --- a/components/viz/service/frame_sinks/compositor_frame_sink_support.cc +++ b/components/viz/service/frame_sinks/compositor_frame_sink_support.cc @@ -368,16 +368,16 @@ void CompositorFrameSinkSupport::ReturnResources( return; // When features::OnBeginFrameAcks is disabled we attempt to return resources - // in DidReceiveCompositorFrameAck. However if there is no - // `ack_pending_from_surface_count_` then we don't expect that signal soon. In - // which case we return the resources to the `client_` now. + // in DidReceiveCompositorFrameAck. However if there are no pending frames + // then we don't expect that signal soon. In which case we return the + // resources to the `client_` now. // // When features::OnBeginFrameAcks is enabled we attempt to return resources // during the next OnBeginFrame. However if we currently do not // `needs_begin_frame_` or if we have been disconnected from a // `begin_frame_source_` then we don't expect that signal soon. In which case // we return the resources to the `client_` now. - if (!ack_pending_from_surface_count_ && client_ && + if (pending_frames_.empty() && client_ && (!ShouldMergeBeginFrameWithAcks() || (!needs_begin_frame_ || !begin_frame_source_))) { client_->ReclaimResources(std::move(resources)); @@ -555,6 +555,34 @@ void CompositorFrameSinkSupport::DidDeleteSharedBitmap( owned_bitmaps_.erase(id); } +void CompositorFrameSinkSupport::SubmitCompositorFrameLocally( + const SurfaceId& surface_id, + CompositorFrame frame, + const RendererSettings& settings) { + CHECK_EQ(surface_id, last_created_surface_id_); + + pending_frames_.push_back(FrameData{.local_frame = true}); + Surface* surface = surface_manager_->GetSurfaceForId(surface_id); + + auto frame_rejected_callback = + base::ScopedClosureRunner(base::BindOnce([] { NOTREACHED(); })); + auto frame_index = ++last_frame_index_; + Surface::QueueFrameResult result = surface->QueueFrame( + std::move(frame), frame_index, std::move(frame_rejected_callback)); + // Currently, frames are only queued on Android, and we don't need to use + // `SubmitCompositorFrameLocally` for evicting resources on Android. + CHECK_EQ(result, Surface::QueueFrameResult::ACCEPTED_ACTIVE); + + // Make sure this surface will be stretched to match the display size. If + // `auto_resize_output_surface` is false, then swap will not occur meaning + // that the content of this compositor frame will not be presented. If it is + // not, then we won't properly push out existing resources. A mismatch between + // root surface size and display size can happen. For example, there is a race + // condition if `Display` is resized after it is set not visible but before + // any compositor frame with that new size is submitted. + CHECK(settings.auto_resize_output_surface); +} + SubmitResult CompositorFrameSinkSupport::MaybeSubmitCompositorFrame( const LocalSurfaceId& local_surface_id, CompositorFrame frame, @@ -588,7 +616,7 @@ SubmitResult CompositorFrameSinkSupport::MaybeSubmitCompositorFrame( #endif begin_frame_tracker_.ReceivedAck(frame.metadata.begin_frame_ack); - ++ack_pending_from_surface_count_; + pending_frames_.push_back(FrameData{.local_frame = false}); if (frame.metadata.begin_frame_ack.frame_id.source_id == BeginFrameArgs::kManualSourceId) { @@ -778,16 +806,26 @@ void CompositorFrameSinkSupport::HandleCallback() { } void CompositorFrameSinkSupport::DidReceiveCompositorFrameAck() { - DCHECK_GT(ack_pending_from_surface_count_, 0); + DCHECK(!pending_frames_.empty()); bool was_pending_manual_begin_frame_source_ = pending_manual_begin_frame_source_; - ack_pending_from_surface_count_--; - if (!ack_pending_from_surface_count_) { + bool was_local_frame = pending_frames_.front().local_frame; + pending_frames_.pop_front(); + + if (pending_frames_.empty()) { pending_manual_begin_frame_source_ = false; } if (!client_) return; + // If this frame came from viz directly and not from the client, don't send + // the client an ack, since it didn't do anything. Just return the resources. + if (was_local_frame) { + client_->ReclaimResources(std::move(surface_returned_resources_)); + surface_returned_resources_.clear(); + return; + } + // If we have a callback, we only return the resource on onBeginFrame. if (compositor_frame_callback_) { callback_received_receive_ack_ = true; @@ -822,7 +860,7 @@ void CompositorFrameSinkSupport::DidPresentCompositorFrame( base::TimeTicks draw_start_timestamp, const gfx::SwapTimings& swap_timings, const gfx::PresentationFeedback& feedback) { - DCHECK(frame_token); + CHECK_NE(frame_token, kInvalidOrLocalFrameToken); DCHECK((feedback.flags & gfx::PresentationFeedback::kFailure) || (!draw_start_timestamp.is_null() && !swap_timings.is_null())); @@ -941,7 +979,7 @@ void CompositorFrameSinkSupport::OnBeginFrame(const BeginFrameArgs& args) { if (ShouldMergeBeginFrameWithAcks()) { bool frame_ack = ack_queued_for_client_count_ > 0; ack_pending_during_on_begin_frame_ = - !frame_ack && ack_pending_from_surface_count_; + !frame_ack && !pending_frames_.empty(); client_->OnBeginFrame(adjusted_args, std::move(frame_timing_details_), frame_ack, std::move(surface_returned_resources_)); if (frame_ack) { diff --git a/components/viz/service/frame_sinks/compositor_frame_sink_support.h b/components/viz/service/frame_sinks/compositor_frame_sink_support.h index e4702891d1..14d6d06f54 100644 --- a/components/viz/service/frame_sinks/compositor_frame_sink_support.h +++ b/components/viz/service/frame_sinks/compositor_frame_sink_support.h @@ -9,6 +9,7 @@ #include #include +#include "base/containers/circular_deque.h" #include "base/containers/flat_set.h" #include "base/containers/queue.h" #include "base/functional/callback.h" @@ -193,6 +194,15 @@ class VIZ_SERVICE_EXPORT CompositorFrameSinkSupport // doesn't have to exist at the time of calling. void EvictSurface(const LocalSurfaceId& id); + void GarbageCollectSurfaces() { surface_manager_->GarbageCollectSurfaces(); } + + // Submits a compositor frame not from the client but from viz itself. For + // example, this is used to submit empty compositor frames to unref + // resources on root surface eviction. + void SubmitCompositorFrameLocally(const SurfaceId& surface_id, + CompositorFrame frame, + const RendererSettings& settings); + // Attempts to submit a new CompositorFrame to |local_surface_id| and returns // whether the frame was accepted or the reason why it was rejected. If // |local_surface_id| hasn't been submitted before then a new Surface will be @@ -350,9 +360,17 @@ class VIZ_SERVICE_EXPORT CompositorFrameSinkSupport // This has a HitTestAggregator if and only if |is_root_| is true. std::unique_ptr hit_test_aggregator_; - // Counts the number of CompositorFrames that have been submitted and have not + struct FrameData { + // True if this frame was submitted from viz itself. This happens during + // root surface eviction when an empty compositor frame is submitted to + // deref existing resources. + bool local_frame; + }; + + // Keeps track of CompositorFrames that have been submitted and have not // yet received an ACK from their Surface. - int ack_pending_from_surface_count_ = 0; + base::circular_deque pending_frames_; + // Counts the number of ACKs that have been received from a Surface and have // not yet been sent to the CompositorFrameSinkClient. int ack_queued_for_client_count_ = 0; @@ -360,7 +378,8 @@ class VIZ_SERVICE_EXPORT CompositorFrameSinkSupport // When `true` we have received frames from a client using its own // BeginFrameSource. While dealing with frames from multiple sources we cannot - // rely on `ack_pending_from_surface_count_` to throttle frame production. + // rely on checking the number of pending frames in `pending_frames_` to + // throttle frame production. // // TODO(crbug.com/1396081): Track acks, presentation feedback, and resources // being returned, on a per BeginFrameSource basis. For diff --git a/components/viz/service/frame_sinks/compositor_frame_sink_support_unittest.cc b/components/viz/service/frame_sinks/compositor_frame_sink_support_unittest.cc index 1707b7f853..4a32b1742b 100644 --- a/components/viz/service/frame_sinks/compositor_frame_sink_support_unittest.cc +++ b/components/viz/service/frame_sinks/compositor_frame_sink_support_unittest.cc @@ -324,9 +324,8 @@ class OnBeginFrameAcksCompositorFrameSinkSupportTest bool BeginFrameAcksEnabled() const { return GetParam(); } - int ack_pending_from_surface_count( - const CompositorFrameSinkSupport* support) const { - return support->ack_pending_from_surface_count_; + int num_pending_frames(const CompositorFrameSinkSupport* support) const { + return support->pending_frames_.size(); } private: @@ -598,7 +597,7 @@ TEST_P(OnBeginFrameAcksCompositorFrameSinkSupportTest, ResourceLifetime) { // This test relied on CompositorFrameSinkSupport::ReturnResources to not send // as long as there has been no DidReceiveCompositorFrameAck. Such that - // `ack_pending_from_surface_count_` is always greater than 1. + // the number of pending frames is always greater than 1. // // With features::kOnBeginFrameAcks we now return the resources during // OnBeginFrame, however that is throttled while we await any ack. @@ -745,7 +744,7 @@ TEST_P(OnBeginFrameAcksCompositorFrameSinkSupportTest, AddDuringEviction) { testing::Mock::VerifyAndClearExpectations(&mock_client); } - EXPECT_EQ(1, ack_pending_from_surface_count(support.get())); + EXPECT_EQ(1, num_pending_frames(support.get())); } // Verifies that only monotonically increasing LocalSurfaceIds are accepted. diff --git a/components/viz/service/frame_sinks/frame_sink_manager_impl.cc b/components/viz/service/frame_sinks/frame_sink_manager_impl.cc index 3ce6bb505a..4ffea4789e 100644 --- a/components/viz/service/frame_sinks/frame_sink_manager_impl.cc +++ b/components/viz/service/frame_sinks/frame_sink_manager_impl.cc @@ -353,12 +353,18 @@ void FrameSinkManagerImpl::EvictSurfaces( auto it = support_map_.find(surface_id.frame_sink_id()); if (it == support_map_.end()) continue; - it->second->EvictSurface(surface_id.local_surface_id()); - if (!it->second->is_root()) - continue; - auto root_it = root_sink_map_.find(surface_id.frame_sink_id()); - if (root_it != root_sink_map_.end()) - root_it->second->DidEvictSurface(surface_id); + + bool should_evict = true; + if (it->second->is_root()) { + auto root_it = root_sink_map_.find(surface_id.frame_sink_id()); + if (root_it != root_sink_map_.end()) { + should_evict = root_it->second->WillEvictSurface(surface_id); + } + } + + if (should_evict) { + it->second->EvictSurface(surface_id.local_surface_id()); + } } // Trigger garbage collection immediately, otherwise the surface may not be diff --git a/components/viz/service/frame_sinks/frame_sink_manager_unittest.cc b/components/viz/service/frame_sinks/frame_sink_manager_unittest.cc index bbce18b254..1ae72a1265 100644 --- a/components/viz/service/frame_sinks/frame_sink_manager_unittest.cc +++ b/components/viz/service/frame_sinks/frame_sink_manager_unittest.cc @@ -670,6 +670,8 @@ TEST_F(FrameSinkManagerTest, EvictRootSurfaceId) { manager_.CreateRootCompositorFrameSink( root_data.BuildParams(kFrameSinkIdRoot)); + GetRootCompositorFrameSinkImpl()->Resize(gfx::Size(20, 20)); + ParentLocalSurfaceIdAllocator allocator; allocator.GenerateId(); const LocalSurfaceId local_surface_id = allocator.GetCurrentLocalSurfaceId(); @@ -690,6 +692,8 @@ TEST_F(FrameSinkManagerTest, EvictNewerRootSurfaceId) { manager_.CreateRootCompositorFrameSink( root_data.BuildParams(kFrameSinkIdRoot)); + GetRootCompositorFrameSinkImpl()->Resize(gfx::Size(20, 20)); + ParentLocalSurfaceIdAllocator allocator; allocator.GenerateId(); const LocalSurfaceId local_surface_id = allocator.GetCurrentLocalSurfaceId(); @@ -713,6 +717,8 @@ TEST_F(FrameSinkManagerTest, SubmitCompositorFrameWithEvictedSurfaceId) { manager_.CreateRootCompositorFrameSink( root_data.BuildParams(kFrameSinkIdRoot)); + GetRootCompositorFrameSinkImpl()->Resize(gfx::Size(20, 20)); + ParentLocalSurfaceIdAllocator allocator; allocator.GenerateId(); const LocalSurfaceId local_surface_id = allocator.GetCurrentLocalSurfaceId(); diff --git a/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc b/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc index 40c6f57fb3..75e6a0eefd 100644 --- a/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc +++ b/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc @@ -16,7 +16,9 @@ #include "base/time/time.h" #include "build/build_config.h" #include "build/chromeos_buildflags.h" +#include "components/viz/common/features.h" #include "components/viz/common/frame_sinks/begin_frame_source.h" +#include "components/viz/common/quads/solid_color_draw_quad.h" #include "components/viz/service/display/display.h" #include "components/viz/service/display/output_surface.h" #include "components/viz/service/display_embedder/output_surface_provider.h" @@ -241,16 +243,82 @@ RootCompositorFrameSinkImpl::~RootCompositorFrameSinkImpl() { begin_frame_source()); } -void RootCompositorFrameSinkImpl::DidEvictSurface(const SurfaceId& surface_id) { +bool RootCompositorFrameSinkImpl::WillEvictSurface( + const SurfaceId& surface_id) { const SurfaceId& current_surface_id = display_->CurrentSurfaceId(); if (!current_surface_id.is_valid()) - return; - DCHECK_EQ(surface_id.frame_sink_id(), surface_id.frame_sink_id()); - // This matches CompositorFrameSinkSupport's eviction logic. + return true; // Okay to evict immediately. + DCHECK_EQ(surface_id.frame_sink_id(), current_surface_id.frame_sink_id()); + CHECK(!display_->visible()); + DCHECK(display_->has_scheduler()); + + // This matches CompositorFrameSinkSupport's eviction logic, which wil evict + // `surface_id` or matching but older ones. Avoid overwriting the contents + // of `current_surface_id` if it's newer here by doing the same check. if (surface_id.local_surface_id().parent_sequence_number() >= current_surface_id.local_surface_id().parent_sequence_number()) { - display_->InvalidateCurrentSurfaceId(); + // Push empty compositor frame to root surface. This is so the resources + // can be unreffed from both viz and the OS compositor (if required). + CompositorFrame frame; + + auto& metadata = frame.metadata; + metadata.frame_token = kInvalidOrLocalFrameToken; + + // The given `surface_id` may be newer than `current_surface_id`, so use the + // one we actually have. + auto* surface = + support_->frame_sink_manager()->surface_manager()->GetSurfaceForId( + current_surface_id); + CHECK(surface); + metadata.device_scale_factor = surface->device_scale_factor(); + frame.metadata.begin_frame_ack = BeginFrameAck::CreateManualAckWithDamage(); + + frame.render_pass_list.push_back(CompositorRenderPass::Create()); + const std::unique_ptr& render_pass = + frame.render_pass_list.back(); + + const CompositorRenderPassId kRenderPassId{1}; + auto surface_rect = gfx::Rect(surface->size_in_pixels()); + DCHECK(!surface_rect.IsEmpty()); + render_pass->SetNew(kRenderPassId, /*output_rect=*/surface_rect, + /*damage_rect=*/surface_rect, gfx::Transform()); +#if BUILDFLAG(IS_OHOS) + render_pass->has_transparent_background = false; +#endif + + SharedQuadState* quad_state = render_pass->CreateAndAppendSharedQuadState(); + + quad_state->SetAll(gfx::Transform(), /*layer_rect=*/surface_rect, + /*visible_layer_rect=*/surface_rect, + /*filter_info=*/gfx::MaskFilterInfo(), + /*clip=*/absl::nullopt, + /*contents_opaque=*/true, /*opacity_f=*/1.f, + /*blend=*/SkBlendMode::kSrcOver, /*sorting_context=*/0); + + SolidColorDrawQuad* solid_quad = + render_pass->CreateAndAppendDrawQuad(); +#if BUILDFLAG(IS_OHOS) + solid_quad->SetNew(quad_state, surface_rect, surface_rect, SkColors::kWhite, + /*anti_aliasing_off=*/false); +#else + solid_quad->SetNew(quad_state, surface_rect, surface_rect, SkColors::kBlack, + /*anti_aliasing_off=*/false); +#endif + + support_->SubmitCompositorFrameLocally(current_surface_id, std::move(frame), + display_->settings()); + + // Complete the eviction on next draw and swap. + to_evict_on_next_draw_and_swap_ = surface_id.local_surface_id(); + display_->SetVisible(true); + display_->ForceImmediateDrawAndSwapIfPossible(); + // Don't evict immediately. + // Delay eviction until the next draw to make sure that the draw is + // successful (requires the surface not to be evicted). We need the draw (of + // an empty CF) to be successful to push out and free resources. + return false; } + return true; // Okay to evict immediately. } const SurfaceId& RootCompositorFrameSinkImpl::CurrentSurfaceId() const { @@ -258,6 +326,9 @@ const SurfaceId& RootCompositorFrameSinkImpl::CurrentSurfaceId() const { } void RootCompositorFrameSinkImpl::SetDisplayVisible(bool visible) { + if (visible) { + to_evict_on_next_draw_and_swap_ = LocalSurfaceId(); + } display_->SetVisible(visible); } @@ -612,6 +683,15 @@ void RootCompositorFrameSinkImpl::DisplayWillDrawAndSwap( AggregatedRenderPassList* render_passes) { DCHECK(support_->GetHitTestAggregator()); support_->GetHitTestAggregator()->Aggregate(display_->CurrentSurfaceId()); + +#if BUILDFLAG(IS_OHOS) + if (to_evict_on_next_draw_and_swap_.is_valid()) { + if (base::FeatureList::IsEnabled(features::kEvictRootSurface) && + features::kPreserveOutputContentOnEvictRootSurface.Get()) { + display_->SetSkipDrawAndEvictResourcesOnCurrentFrame(); + } + } +#endif } base::ScopedClosureRunner RootCompositorFrameSinkImpl::GetCacheBackBufferCb() { @@ -740,7 +820,23 @@ RootCompositorFrameSinkImpl::GetPreferredFrameIntervalForFrameSinkId( ->GetPreferredFrameIntervalForFrameSinkId(id, type); } -void RootCompositorFrameSinkImpl::DisplayDidDrawAndSwap() {} +void RootCompositorFrameSinkImpl::DisplayDidDrawAndSwap() { + if (to_evict_on_next_draw_and_swap_.is_valid()) { + display_->SetVisible(false); + display_->InvalidateCurrentSurfaceId(); + + support_->EvictSurface(to_evict_on_next_draw_and_swap_); + + // Trigger garbage collection immediately, otherwise the surface may not be + // evicted for a long time (e.g. not before a frame is produced). + if (base::FeatureList::IsEnabled( + features::kEagerSurfaceGarbageCollection)) { + support_->GarbageCollectSurfaces(); + } + } + + to_evict_on_next_draw_and_swap_ = LocalSurfaceId(); +} BeginFrameSource* RootCompositorFrameSinkImpl::begin_frame_source() { if (external_begin_frame_source_) diff --git a/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h b/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h index 8357e95d89..2cc7e4bc75 100644 --- a/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h +++ b/components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h @@ -63,7 +63,8 @@ class VIZ_SERVICE_EXPORT RootCompositorFrameSinkImpl ~RootCompositorFrameSinkImpl() override; - void DidEvictSurface(const SurfaceId& surface_id); + // Returns true iff it is okay to evict the root surface immediately. + bool WillEvictSurface(const SurfaceId& surface_id); const SurfaceId& CurrentSurfaceId() const; @@ -229,6 +230,11 @@ void SetHandledTouchEvent(bool handledTouchEvent) override {} // TODO(http://crbug.com/1153404): Remove this field when experiment is over. bool apply_simple_frame_rate_throttling_ = false; + // If we evict the root surface, we want to push an empty compositor frame to + // it first to unref its resources. This requires a draw and swap to complete + // to actually unref. + LocalSurfaceId to_evict_on_next_draw_and_swap_ = LocalSurfaceId(); + // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch // of lacros-chrome is complete. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_OHOS) diff --git a/components/viz/service/surfaces/surface.cc b/components/viz/service/surfaces/surface.cc index e7282a7b0e..7472a2d7e8 100644 --- a/components/viz/service/surfaces/surface.cc +++ b/components/viz/service/surfaces/surface.cc @@ -70,7 +70,7 @@ void Surface::PresentationHelper::DidPresent( base::TimeTicks draw_start_timestamp, const gfx::SwapTimings& swap_timings, const gfx::PresentationFeedback& feedback) { - if (surface_client_ && frame_token_) { + if (surface_client_ && frame_token_ != kInvalidOrLocalFrameToken) { surface_client_->OnSurfacePresented(frame_token_, draw_start_timestamp, swap_timings, feedback); } @@ -839,7 +839,8 @@ void Surface::UnrefFrameResourcesAndRunCallbacks( // If we won't be getting a presented notification, we'll notify the client // when the frame is unref'd. - if (!frame_data->will_be_notified_of_presentation && surface_client_) { + if (!frame_data->will_be_notified_of_presentation && surface_client_ && + frame_data->frame.metadata.frame_token != kInvalidOrLocalFrameToken) { surface_client_->OnSurfacePresented(frame_data->frame.metadata.frame_token, base::TimeTicks(), gfx::SwapTimings(), gfx::PresentationFeedback::Failure()); diff --git a/components/viz/service/surfaces/surface_unittest.cc b/components/viz/service/surfaces/surface_unittest.cc index e8d7b4038c..9a2d51d049 100644 --- a/components/viz/service/surfaces/surface_unittest.cc +++ b/components/viz/service/surfaces/surface_unittest.cc @@ -79,7 +79,7 @@ TEST_P(OnBeginFrameAcksSurfaceTest, PresentationCallback) { if (BeginFrameAcksEnabled()) { support->SetWantsBeginFrameAcks(); } - uint32_t frame_token = 0; + uint32_t frame_token = kInvalidOrLocalFrameToken; { CompositorFrame frame = CompositorFrameBuilder() @@ -87,7 +87,7 @@ TEST_P(OnBeginFrameAcksSurfaceTest, PresentationCallback) { .SetBeginFrameSourceId(kBeginFrameSourceId) .Build(); frame_token = frame.metadata.frame_token; - ASSERT_NE(frame_token, 0u); + ASSERT_NE(frame_token, kInvalidOrLocalFrameToken); EXPECT_CALL(client, DidReceiveCompositorFrameAck(testing::_)) .Times(BeginFrameAcksEnabled() ? 0 : 1); support->SubmitCompositorFrame(local_surface_id, std::move(frame)); diff --git a/components/viz/test/fake_host_frame_sink_client.h b/components/viz/test/fake_host_frame_sink_client.h index e4691fb50b..4ca2a27dae 100644 --- a/components/viz/test/fake_host_frame_sink_client.h +++ b/components/viz/test/fake_host_frame_sink_client.h @@ -6,6 +6,7 @@ #define COMPONENTS_VIZ_TEST_FAKE_HOST_FRAME_SINK_CLIENT_H_ #include "base/time/time.h" +#include "components/viz/common/quads/compositor_frame_metadata.h" #include "components/viz/common/surfaces/surface_info.h" #include "components/viz/host/host_frame_sink_client.h" @@ -28,7 +29,7 @@ class FakeHostFrameSinkClient : public HostFrameSinkClient { uint32_t last_frame_token_seen() const { return last_frame_token_seen_; } private: - uint32_t last_frame_token_seen_ = 0u; + uint32_t last_frame_token_seen_ = kInvalidOrLocalFrameToken; }; } // namespace viz diff --git a/content/browser/renderer_host/delegated_frame_host.cc b/content/browser/renderer_host/delegated_frame_host.cc index 7b5062a283..81a93d924c 100644 --- a/content/browser/renderer_host/delegated_frame_host.cc +++ b/content/browser/renderer_host/delegated_frame_host.cc @@ -31,6 +31,10 @@ #include "third_party/skia/include/core/SkColor.h" #include "ui/gfx/geometry/dip_util.h" +#if BUILDFLAG(IS_OHOS) +#include "third_party/ohos_ndk/includes/ohos_adapter/ohos_adapter_helper.h" +#endif + namespace content { namespace { @@ -630,4 +634,20 @@ void DelegatedFrameHost::TakeFallbackContentFrom(DelegatedFrameHost* other) { desired_fallback); } +#if BUILDFLAG(IS_OHOS) +// static +bool DelegatedFrameHost::ShouldIncludeUiCompositorForEviction() { + static bool persist_web_root_surface_eviction_enabled = + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetSystemPropertiesInstance() + .GetBoolParameter("persist.web.root_surface_eviction.enable", + true); + if (!persist_web_root_surface_eviction_enabled) { + return false; + } + + return base::FeatureList::IsEnabled(features::kEvictRootSurface); +} +#endif + } // namespace content diff --git a/content/browser/renderer_host/delegated_frame_host.h b/content/browser/renderer_host/delegated_frame_host.h index dce7d340a5..a81ddfbc80 100644 --- a/content/browser/renderer_host/delegated_frame_host.h +++ b/content/browser/renderer_host/delegated_frame_host.h @@ -209,6 +209,10 @@ class CONTENT_EXPORT DelegatedFrameHost return frame_evictor_.get(); } +#if BUILDFLAG(IS_OHOS) + static bool ShouldIncludeUiCompositorForEviction(); +#endif + private: friend class DelegatedFrameHostClient; FRIEND_TEST_ALL_PREFIXES(RenderWidgetHostViewAuraBrowserTest, -- Gitee From e31aaf136223923247ef7f260d5966dc42423050 Mon Sep 17 00:00:00 2001 From: v00863305 Date: Wed, 10 Apr 2024 08:23:21 +0300 Subject: [PATCH 4/5] Shared image pool Global shared image backing pool which managing cacheable GPU resources and allowing to reuse them to reduce GPU memory peak allocation during tab opening or switching (background to foreground and vise versa). On chromium level (enabled by default for browser on Tablet/PC): --enable-feature=SharedImagePool (max-count/memory-limit/expiration-delay -> 50/30MB/30secs) On system level (enabled by default): persist.web.shared_image_pool.enable true And more carefully aggregate new "enable/disable" features into group. For instance: --enable-features=X --enable-features=Y Conceptually, this indicates to enable features X and Y. However Chrome's command line parsing only applies the last seen switch, resulting in only feature Y being enabled. To solve this, transform it to: --enable-features=X,Y Change-Id: I6e75587b7181fdf9fac7d780342999f5ce3f902c Signed-off-by: Volykhin Andrei --- gpu/command_buffer/service/BUILD.gn | 12 + .../service/shared_image/egl_image_backing.cc | 35 ++ .../service/shared_image/egl_image_backing.h | 15 + .../shared_image/egl_image_backing_factory.cc | 46 +++ .../shared_image/egl_image_backing_factory.h | 17 + .../shared_image/shared_image_backing.h | 6 + .../shared_image_backing_factory.h | 22 ++ .../shared_image/shared_image_factory.cc | 34 ++ .../shared_image/shared_image_manager.cc | 21 ++ .../shared_image/shared_image_manager.h | 11 + .../service/shared_image/shared_image_pool.cc | 327 ++++++++++++++++++ .../service/shared_image/shared_image_pool.h | 104 ++++++ ohos_nweb/src/nweb_impl.cc | 24 +- 13 files changed, 673 insertions(+), 1 deletion(-) create mode 100644 gpu/command_buffer/service/shared_image/shared_image_pool.cc create mode 100644 gpu/command_buffer/service/shared_image/shared_image_pool.h diff --git a/gpu/command_buffer/service/BUILD.gn b/gpu/command_buffer/service/BUILD.gn index 90d204c5bb..ee2e46ecc3 100644 --- a/gpu/command_buffer/service/BUILD.gn +++ b/gpu/command_buffer/service/BUILD.gn @@ -596,6 +596,18 @@ target(link_target_type, "gles2_sources") { } } + if (is_ohos) { + if (!is_debug) { + # On OpenHarmony optimize more since this component can be a bottleneck. + configs -= [ "//build/config/compiler:default_optimization" ] + configs += [ "//build/config/compiler:optimize_max" ] + } + sources += [ + "shared_image/shared_image_pool.cc", + "shared_image/shared_image_pool.h", + ] + } + if (is_win) { sources += [ "dxgi_shared_handle_manager.cc", diff --git a/gpu/command_buffer/service/shared_image/egl_image_backing.cc b/gpu/command_buffer/service/shared_image/egl_image_backing.cc index 3581075698..fe68d5d7d9 100644 --- a/gpu/command_buffer/service/shared_image/egl_image_backing.cc +++ b/gpu/command_buffer/service/shared_image/egl_image_backing.cc @@ -222,6 +222,34 @@ EGLImageBacking::~EGLImageBacking() { DCHECK(!source_texture_holder_); } +#if BUILDFLAG(IS_OHOS) +// static +std::unique_ptr EGLImageBacking::CreateFromBacking( + const Mailbox& mailbox, + std::unique_ptr backing, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + const GLCommonImageBackingFactory::FormatInfo& format_into, + const GpuDriverBugWorkarounds& workarounds, + bool use_passthrough) { + auto new_backing = std::make_unique( + mailbox, backing->format(), backing->size(), color_space, surface_origin, + alpha_type, backing->usage(), backing->GetEstimatedSize(), format_into, + workarounds, use_passthrough, base::span()); + + { + AutoLock auto_lock(backing.get()); + AutoLock auto_lock2(new_backing.get()); + + DCHECK(backing->egl_image_.is_valid()); + new_backing->egl_image_ = std::move(backing->egl_image_); + } + + return new_backing; +} +#endif + SharedImageBackingType EGLImageBacking::GetType() const { return SharedImageBackingType::kEGLImage; } @@ -527,4 +555,11 @@ void EGLImageBacking::MarkForDestruction() { source_texture_holder_.reset(); } +#if BUILDFLAG(IS_OHOS) +bool EGLImageBacking::IsReusable() const { + AutoLock auto_lock(this); + return have_context() && egl_image_.is_valid(); +} +#endif + } // namespace gpu diff --git a/gpu/command_buffer/service/shared_image/egl_image_backing.h b/gpu/command_buffer/service/shared_image/egl_image_backing.h index d2abc2c463..5a2a318718 100644 --- a/gpu/command_buffer/service/shared_image/egl_image_backing.h +++ b/gpu/command_buffer/service/shared_image/egl_image_backing.h @@ -48,10 +48,25 @@ class EGLImageBacking : public ClearTrackingSharedImageBacking { ~EGLImageBacking() override; +#if BUILDFLAG(IS_OHOS) + static std::unique_ptr CreateFromBacking( + const Mailbox& mailbox, + std::unique_ptr backing, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + const GLCommonImageBackingFactory::FormatInfo& format_into, + const GpuDriverBugWorkarounds& workarounds, + bool use_passthrough); +#endif + // SharedImageBacking implementation. SharedImageBackingType GetType() const override; void Update(std::unique_ptr in_fence) override; void MarkForDestruction() override; +#if BUILDFLAG(IS_OHOS) + bool IsReusable() const override; +#endif protected: std::unique_ptr ProduceGLTexture( diff --git a/gpu/command_buffer/service/shared_image/egl_image_backing_factory.cc b/gpu/command_buffer/service/shared_image/egl_image_backing_factory.cc index c8e9f01721..936769e076 100644 --- a/gpu/command_buffer/service/shared_image/egl_image_backing_factory.cc +++ b/gpu/command_buffer/service/shared_image/egl_image_backing_factory.cc @@ -146,6 +146,52 @@ bool EGLImageBackingFactory::IsSupported(uint32_t usage, return CanCreateTexture(format, size, pixel_data, GL_TEXTURE_2D); } +#if BUILDFLAG(IS_OHOS) +bool EGLImageBackingFactory::CanCreateFromSharedImage( + const std::unique_ptr& backing, + uint32_t usage, + viz::SharedImageFormat format, + const gfx::Size& size, + bool thread_safe, + base::span pixel_data) { + if (backing->GetType() != SharedImageBackingType::kEGLImage) { + return false; + } + + if (!pixel_data.empty()) { + return false; + } + + if (usage != backing->usage() || format != backing->format() || + size != backing->size() || thread_safe != backing->is_thread_safe()) { + return false; + } + + return backing->IsReusable(); +} + +std::unique_ptr +EGLImageBackingFactory::CreateFromSharedImage( + const Mailbox& mailbox, + std::unique_ptr backing, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type) { + CHECK(backing->GetType() == SharedImageBackingType::kEGLImage); + + // EGLImageBacking only supports single-planar textures (so far). + auto format_info = GetFormatInfo(backing->format()); + DCHECK_EQ(format_info.size(), 1u); + + std::unique_ptr egl_backing( + static_cast(backing.release())); + + return EGLImageBacking::CreateFromBacking( + mailbox, std::move(egl_backing), color_space, surface_origin, alpha_type, + format_info[0], workarounds_, use_passthrough_); +} +#endif + std::unique_ptr EGLImageBackingFactory::MakeEglImageBacking( const Mailbox& mailbox, viz::SharedImageFormat format, diff --git a/gpu/command_buffer/service/shared_image/egl_image_backing_factory.h b/gpu/command_buffer/service/shared_image/egl_image_backing_factory.h index cd4e718a43..c02978113e 100644 --- a/gpu/command_buffer/service/shared_image/egl_image_backing_factory.h +++ b/gpu/command_buffer/service/shared_image/egl_image_backing_factory.h @@ -77,6 +77,23 @@ class GPU_GLES2_EXPORT EGLImageBackingFactory GrContextType gr_context_type, base::span pixel_data) override; +#if BUILDFLAG(IS_OHOS) + bool CanCreateFromSharedImage( + const std::unique_ptr& backing, + uint32_t usage, + viz::SharedImageFormat format, + const gfx::Size& size, + bool thread_safe, + base::span pixel_data) override; + + std::unique_ptr CreateFromSharedImage( + const Mailbox& mailbox, + std::unique_ptr backing, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type) override; +#endif + private: std::unique_ptr MakeEglImageBacking( const Mailbox& mailbox, diff --git a/gpu/command_buffer/service/shared_image/shared_image_backing.h b/gpu/command_buffer/service/shared_image/shared_image_backing.h index e9b6736f2f..ec06bb61b0 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_backing.h +++ b/gpu/command_buffer/service/shared_image/shared_image_backing.h @@ -174,6 +174,12 @@ class GPU_GLES2_EXPORT SharedImageBacking { virtual void SetPurgeable(bool purgeable) {} virtual bool IsPurgeable() const; +#if BUILDFLAG(IS_OHOS) + // Indicate that the image is reusable. When an image is reusable, its + // actual storage could be reuse by cache. + virtual bool IsReusable() const { return false; } +#endif + virtual void Update(std::unique_ptr in_fence); // Uploads pixels from memory into GPU texture. `pixmaps` should have one diff --git a/gpu/command_buffer/service/shared_image/shared_image_backing_factory.h b/gpu/command_buffer/service/shared_image/shared_image_backing_factory.h index 2dce6ac307..0afcc147f4 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_backing_factory.h +++ b/gpu/command_buffer/service/shared_image/shared_image_backing_factory.h @@ -94,6 +94,28 @@ class GPU_GLES2_EXPORT SharedImageBackingFactory { GrContextType gr_context_type, base::span pixel_data); +#if BUILDFLAG(IS_OHOS) + // Returns true if the factory is supported + virtual bool CanCreateFromSharedImage( + const std::unique_ptr& backing, + uint32_t usage, + viz::SharedImageFormat format, + const gfx::Size& size, + bool thread_safe, + base::span pixel_data) { + return false; + } + + virtual std::unique_ptr CreateFromSharedImage( + const Mailbox& mailbox, + std::unique_ptr backing, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type) { + return nullptr; + } +#endif + base::WeakPtr GetWeakPtr(); protected: diff --git a/gpu/command_buffer/service/shared_image/shared_image_factory.cc b/gpu/command_buffer/service/shared_image/shared_image_factory.cc index 67bf53168e..bb2525aeb7 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_factory.cc +++ b/gpu/command_buffer/service/shared_image/shared_image_factory.cc @@ -77,6 +77,10 @@ #include "ui/gl/gl_display.h" #endif // defined(USE_EGL) +#if BUILDFLAG(IS_OHOS) +#include "gpu/command_buffer/service/shared_image/shared_image_pool.h" +#endif + namespace gpu { namespace { @@ -314,9 +318,24 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox, return false; } +#if BUILDFLAG(IS_OHOS) + std::unique_ptr backing; + if (auto* shared_image_pool = shared_image_manager_->shared_image_pool()) { + backing = shared_image_pool->GetOrCreateSharedImage( + factory, mailbox, format, surface_handle, size, color_space, + surface_origin, alpha_type, usage, std::move(debug_label), + IsSharedBetweenThreads(usage), base::span()); + } else { + backing = factory->CreateSharedImage( + mailbox, format, surface_handle, size, color_space, surface_origin, + alpha_type, usage, std::move(debug_label), + IsSharedBetweenThreads(usage)); + } +#else auto backing = factory->CreateSharedImage( mailbox, format, surface_handle, size, color_space, surface_origin, alpha_type, usage, std::move(debug_label), IsSharedBetweenThreads(usage)); +#endif if (backing) { DVLOG(1) << "CreateSharedImage[" << backing->GetName() @@ -354,9 +373,24 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox, return false; } +#if BUILDFLAG(IS_OHOS) + std::unique_ptr backing; + if (auto* shared_image_pool = shared_image_manager_->shared_image_pool()) { + backing = shared_image_pool->GetOrCreateSharedImage( + factory, mailbox, format, kNullSurfaceHandle, size, color_space, + surface_origin, alpha_type, usage, std::move(debug_label), + IsSharedBetweenThreads(usage), data); + } else { + backing = factory->CreateSharedImage(mailbox, format, size, color_space, + surface_origin, alpha_type, usage, + std::move(debug_label), data); + } +#else auto backing = factory->CreateSharedImage(mailbox, format, size, color_space, surface_origin, alpha_type, usage, std::move(debug_label), data); +#endif + if (backing) { DVLOG(1) << "CreateSharedImagePixels[" << backing->GetName() << "] with pixels size=" << size.ToString() diff --git a/gpu/command_buffer/service/shared_image/shared_image_manager.cc b/gpu/command_buffer/service/shared_image/shared_image_manager.cc index 81cbf2af28..b8255939ab 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_manager.cc +++ b/gpu/command_buffer/service/shared_image/shared_image_manager.cc @@ -35,6 +35,10 @@ #include "base/android/android_hardware_buffer_compat.h" #endif +#if BUILDFLAG(IS_OHOS) +#include "gpu/command_buffer/service/shared_image/shared_image_pool.h" +#endif + #if DCHECK_IS_ON() #define CALLED_ON_VALID_THREAD() \ do { \ @@ -90,6 +94,9 @@ SharedImageManager::SharedImageManager(bool thread_safe, dxgi_shared_handle_manager_ = base::MakeRefCounted(std::move(d3d11_device)); } +#endif +#if BUILDFLAG(IS_OHOS) + shared_image_pool_ = SharedImagePool::Create(thread_safe); #endif CALLED_ON_VALID_THREAD(); @@ -109,6 +116,10 @@ SharedImageManager::~SharedImageManager() { #endif DCHECK(images_.empty()); +#if BUILDFLAG(IS_OHOS) + shared_image_pool_.reset(); +#endif + if (is_registered_as_memory_dump_provider_) { base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( this); @@ -415,8 +426,18 @@ void SharedImageManager::OnRepresentationDestroyed( // SharedImageManager::OnRepresentationDestroyed can be nested, so we need // to get the iterator again. auto found = images_.find(mailbox); + +#if BUILDFLAG(IS_OHOS) + if (found != images_.end() && (!(*found)->HasAnyRefs())) { + if (shared_image_pool_) { + shared_image_pool_->ReturnSharedImage(std::move(*found)); + } + images_.erase(found); + } +#else if (found != images_.end() && (!(*found)->HasAnyRefs())) images_.erase(found); +#endif } } diff --git a/gpu/command_buffer/service/shared_image/shared_image_manager.h b/gpu/command_buffer/service/shared_image/shared_image_manager.h index 54dc60e348..4fd9529e43 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_manager.h +++ b/gpu/command_buffer/service/shared_image/shared_image_manager.h @@ -24,6 +24,9 @@ namespace gpu { class DXGISharedHandleManager; class SharedImageRepresentationFactoryRef; class VaapiDependenciesFactory; +#if BUILDFLAG(IS_OHOS) +class SharedImagePool; +#endif class GPU_GLES2_EXPORT SharedImageManager : public base::trace_event::MemoryDumpProvider { @@ -135,6 +138,10 @@ class GPU_GLES2_EXPORT SharedImageManager } #endif +#if BUILDFLAG(IS_OHOS) + SharedImagePool* shared_image_pool() { return shared_image_pool_.get(); } +#endif + private: class AutoLock; // The lock for protecting |images_|. @@ -150,6 +157,10 @@ class GPU_GLES2_EXPORT SharedImageManager scoped_refptr dxgi_shared_handle_manager_; #endif +#if BUILDFLAG(IS_OHOS) + std::unique_ptr shared_image_pool_; +#endif + THREAD_CHECKER(thread_checker_); }; diff --git a/gpu/command_buffer/service/shared_image/shared_image_pool.cc b/gpu/command_buffer/service/shared_image/shared_image_pool.cc new file mode 100644 index 0000000000..3203bc390c --- /dev/null +++ b/gpu/command_buffer/service/shared_image/shared_image_pool.cc @@ -0,0 +1,327 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/service/shared_image/shared_image_pool.h" + +#include "base/feature_list.h" +#include "base/logging.h" +#include "base/memory/ptr_util.h" +#include "base/metrics/field_trial_params.h" +#include "base/task/single_thread_task_runner.h" +#include "base/trace_event/memory_dump_manager.h" +#include "gpu/command_buffer/common/shared_image_usage.h" +#include "gpu/command_buffer/service/shared_image/shared_image_backing_factory.h" +#include "third_party/ohos_ndk/includes/ohos_adapter/ohos_adapter_helper.h" + +namespace gpu { + +namespace { + +// Causes us to use the shared image backing pool. +BASE_FEATURE(kSharedImagePool, + "SharedImagePool", + base::FEATURE_DISABLED_BY_DEFAULT); + +// The max amount of images to keep in the pool. +constexpr base::FeatureParam kSharedImagePoolMaxCount{&kSharedImagePool, + "max-count", 50}; + +// The max size in MB allowed for the internal LRU pool of images. +constexpr base::FeatureParam kSharedImagePoolMemoryLimit{ + &kSharedImagePool, "memory-limit", 30}; + +// The max number of seconds since an LRU image has been used before we +// will try to evict it from the pool. +const base::FeatureParam kSharedImagePoolExpirationDelay{ + &kSharedImagePool, "expiration-delay", base::Seconds(30)}; + +const base::FeatureParam kSharedImagePoolSupportsCanvasResourceRaster{ + &kSharedImagePool, "supports-canvas-resource-raster", false}; + +const base::FeatureParam kSharedImagePoolSupportsWebGLDrawingBuffer{ + &kSharedImagePool, "supports-webgl-drawing-buffer", false}; + +constexpr uint32_t kGpuRasterTileUsage = SHARED_IMAGE_USAGE_DISPLAY_READ | + SHARED_IMAGE_USAGE_RASTER | + SHARED_IMAGE_USAGE_OOP_RASTERIZATION; + +constexpr uint32_t kCanvasResourceRasterUsage = + SHARED_IMAGE_USAGE_DISPLAY_READ | gpu::SHARED_IMAGE_USAGE_RASTER | + gpu::SHARED_IMAGE_USAGE_OOP_RASTERIZATION | gpu::SHARED_IMAGE_USAGE_GLES2 | + gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT; + +constexpr uint32_t kWebGLDrawingBufferUsage = + gpu::SHARED_IMAGE_USAGE_DISPLAY_READ | gpu::SHARED_IMAGE_USAGE_GLES2 | + gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT; + +constexpr uint32_t kWebGLDrawingBufferHighPerformanceUsage = + kWebGLDrawingBufferUsage | gpu::SHARED_IMAGE_USAGE_HIGH_PERFORMANCE_GPU; + +bool IsUsageSupported(uint32_t usage) { + static bool supports_canvas_resource_raster = + kSharedImagePoolSupportsCanvasResourceRaster.Get(); + static bool supports_webgl_drawing_buffer = + kSharedImagePoolSupportsWebGLDrawingBuffer.Get(); + + if (usage == kGpuRasterTileUsage) { + return true; + } else if (usage == kCanvasResourceRasterUsage) { + return supports_canvas_resource_raster; + } else if (usage == kWebGLDrawingBufferUsage || + usage == kWebGLDrawingBufferHighPerformanceUsage) { + return supports_webgl_drawing_buffer; + } + + return false; +} + +} // namespace + +class SCOPED_LOCKABLE SharedImagePool::AutoLock { + public: + explicit AutoLock(const SharedImagePool* pool) + EXCLUSIVE_LOCK_FUNCTION(pool->lock_) + : auto_lock_(pool->is_thread_safe() ? &pool->lock_.value() : nullptr) {} + + AutoLock(const AutoLock&) = delete; + AutoLock& operator=(const AutoLock&) = delete; + + ~AutoLock() UNLOCK_FUNCTION() = default; + + private: + base::AutoLockMaybe auto_lock_; +}; + +// static +std::unique_ptr SharedImagePool::Create(bool thread_safe) { + static bool persist_web_shared_image_pool_enabled = + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetSystemPropertiesInstance() + .GetBoolParameter("persist.web.shared_image_pool.enable", true); + if (!persist_web_shared_image_pool_enabled) { + return nullptr; + } + + if (!base::FeatureList::IsEnabled(kSharedImagePool)) { + return nullptr; + } + + return base::WrapUnique(new SharedImagePool(thread_safe)); +} + +SharedImagePool::SharedImagePool(bool thread_safe) + : max_memory_usage_bytes_(kSharedImagePoolMemoryLimit.Get() * 1024 * 1024), + max_image_count_(kSharedImagePoolMaxCount.Get()), + expiration_delay_(kSharedImagePoolExpirationDelay.Get()), + task_runner_(base::SingleThreadTaskRunner::GetCurrentDefault()) { + DCHECK(task_runner_); + + if (thread_safe) { + lock_.emplace(); + } + + base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( + this, "gpu::SharedImagePool", task_runner_); + + memory_pressure_listener_ = std::make_unique( + FROM_HERE, base::BindRepeating(&SharedImagePool::OnMemoryPressure, + weak_ptr_factory_.GetWeakPtr())); +} + +SharedImagePool::~SharedImagePool() { + base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( + this); + + AutoLock autolock(this); + + EvictImagesNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); + DCHECK_EQ(0u, images_.size()); + DCHECK_EQ(0u, total_memory_usage_bytes_); + DCHECK_EQ(0u, total_image_count_); +} + +bool SharedImagePool::OnMemoryDump( + const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) { + AutoLock autolock(this); + + size_t total_size = 0; + for (auto& image : images_) { + total_size = image.backing->GetEstimatedSizeForMemoryDump(); + } + + std::string dump_name = base::StringPrintf( + "gpu/shared_image_pool/0x%" PRIXPTR, reinterpret_cast(this)); + base::trace_event::MemoryAllocatorDump* dump = + pmd->CreateAllocatorDump(dump_name); + dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, + base::trace_event::MemoryAllocatorDump::kUnitsBytes, + total_size); + return true; +} + +void SharedImagePool::OnMemoryPressure( + base::MemoryPressureListener::MemoryPressureLevel level) { + AutoLock autolock(this); + + switch (level) { + case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE: + case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE: + break; + case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL: + // Evict all images, regardless of how recently they were used. + EvictImagesNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); + break; + } +} + +std::unique_ptr SharedImagePool::GetOrCreateSharedImage( + SharedImageBackingFactory* factory, + const Mailbox& mailbox, + viz::SharedImageFormat format, + SurfaceHandle surface_handle, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label, + bool is_thread_safe, + base::span pixel_data) { + AutoLock autolock(this); + + if (IsUsageSupported(usage)) { + // Finding image backing in |images_| from MRU to LRU direction. + for (auto it = images_.begin(); it != images_.end();) { + DCHECK(mailbox != it->backing->mailbox()); + + if (factory->CanCreateFromSharedImage(it->backing, usage, format, size, + is_thread_safe, pixel_data)) { + auto backing = factory->CreateFromSharedImage( + mailbox, std::move(it->backing), color_space, surface_origin, + alpha_type); + it = images_.erase(it); + + if (backing) { + total_memory_usage_bytes_ -= backing->GetEstimatedSize(); + --total_image_count_; + return backing; + } else { + DLOG(ERROR) << "Failed to recycle shared image"; + continue; + } + } + + ++it; + } + } + + if (!pixel_data.empty()) { + return factory->CreateSharedImage(mailbox, format, size, color_space, + surface_origin, alpha_type, usage, + std::move(debug_label), pixel_data); + } + + return factory->CreateSharedImage( + mailbox, format, surface_handle, size, color_space, surface_origin, + alpha_type, usage, std::move(debug_label), is_thread_safe); +} + +void SharedImagePool::ReturnSharedImage( + std::unique_ptr backing) { + DCHECK(backing); + + // Whether is returned shared image backing can be reused. + if (!backing->IsReusable() || !IsUsageSupported(backing->usage())) { + return; + } + + AutoLock autolock(this); + + total_memory_usage_bytes_ += backing->GetEstimatedSize(); + ++total_image_count_; + images_.emplace_front(std::move(backing)); + + // Reduce memory of unused images to stay within the limit. + ReduceMemoryUsage(); + // Now that we have evictable images, schedule an eviction call. + ScheduleEvictExpiredImages(); +} + +void SharedImagePool::ReduceMemoryUsage() { + while (!images_.empty()) { + if (total_image_count_ <= max_image_count_ && + total_memory_usage_bytes_ <= max_memory_usage_bytes_) { + break; + } + + DCHECK_GE(total_memory_usage_bytes_, + images_.back().backing->GetEstimatedSize()); + total_memory_usage_bytes_ -= images_.back().backing->GetEstimatedSize(); + --total_image_count_; + + images_.pop_back(); + } +} + +void SharedImagePool::ScheduleEvictExpiredImages() { + if (evict_expired_images_pending_ || images_.empty()) { + return; + } + + evict_expired_images_pending_ = true; + + // Schedule a call to EvictExpiredImages at the time when the LRU image + // should be released. + base::TimeTicks expiration_time = + images_.back().timestamp + expiration_delay_; + task_runner_->PostDelayedTask( + FROM_HERE, + base::BindOnce(&SharedImagePool::EvictExpiredImages, + weak_ptr_factory_.GetWeakPtr()), + expiration_time - base::TimeTicks::Now()); +} + +void SharedImagePool::EvictExpiredImages() { + AutoLock autolock(this); + + evict_expired_images_pending_ = false; + + if (images_.empty()) { + return; + } + + base::TimeTicks current_time = base::TimeTicks::Now(); + EvictImagesNotUsedSince(current_time - expiration_delay_); + ScheduleEvictExpiredImages(); +} + +void SharedImagePool::EvictImagesNotUsedSince(base::TimeTicks time) { + while (!images_.empty()) { + // Note: Back image is guaranteed to be LRU so we can stop releasing + // images as soon as we find a image that has been used since |time|. + if (images_.back().timestamp > time) { + return; + } + + DCHECK_GE(total_memory_usage_bytes_, + images_.back().backing->GetEstimatedSize()); + total_memory_usage_bytes_ -= images_.back().backing->GetEstimatedSize(); + --total_image_count_; + + images_.pop_back(); + } +} + +SharedImagePool::CacheEntry::CacheEntry( + std::unique_ptr backing) + : backing(std::move(backing)), timestamp(base::TimeTicks::Now()) {} + +SharedImagePool::CacheEntry::CacheEntry(CacheEntry&& other) = default; +SharedImagePool::CacheEntry& SharedImagePool::CacheEntry::operator=( + CacheEntry&& other) = default; + +SharedImagePool::CacheEntry::~CacheEntry() = default; + +} // namespace gpu diff --git a/gpu/command_buffer/service/shared_image/shared_image_pool.h b/gpu/command_buffer/service/shared_image/shared_image_pool.h new file mode 100644 index 0000000000..a24c4a316c --- /dev/null +++ b/gpu/command_buffer/service/shared_image/shared_image_pool.h @@ -0,0 +1,104 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_SHARED_IMAGE_POOL_H_ +#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_SHARED_IMAGE_POOL_H_ + +#include "base/containers/circular_deque.h" +#include "base/memory/memory_pressure_listener.h" +#include "base/memory/weak_ptr.h" +#include "base/synchronization/lock.h" +#include "base/task/single_thread_task_runner.h" +#include "base/trace_event/memory_dump_provider.h" +#include "gpu/command_buffer/service/shared_image/shared_image_backing.h" +#include "gpu/gpu_gles2_export.h" +#include "gpu/ipc/common/surface_handle.h" +#include "third_party/abseil-cpp/absl/types/optional.h" + +namespace gpu { + +class SharedImageBackingFactory; + +class GPU_GLES2_EXPORT SharedImagePool + : public base::trace_event::MemoryDumpProvider { + public: + static std::unique_ptr Create(bool thread_safe); + + SharedImagePool(const SharedImagePool&) = delete; + SharedImagePool& operator=(const SharedImagePool&) = delete; + + ~SharedImagePool(); + + // base::trace_event::MemoryDumpProvider implementation. + bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) override; + + void OnMemoryPressure( + base::MemoryPressureListener::MemoryPressureLevel level); + + // Gets a shared image from the pool. If there is no shared image available + // in the pool, a new shared image will be created. + std::unique_ptr GetOrCreateSharedImage( + SharedImageBackingFactory* factory, + const Mailbox& mailbox, + viz::SharedImageFormat format, + SurfaceHandle surface_handle, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label, + bool is_thread_safe, + base::span pixel_data); + + // Returns a shared image back to the pool. It can be reused or released + // immediately. + void ReturnSharedImage(std::unique_ptr backing); + + bool is_thread_safe() const { return !!lock_; } + + private: + explicit SharedImagePool(bool thread_safe); + + void ReduceMemoryUsage() EXCLUSIVE_LOCKS_REQUIRED(lock_); + void ScheduleEvictExpiredImages() EXCLUSIVE_LOCKS_REQUIRED(lock_); + void EvictExpiredImages(); + void EvictImagesNotUsedSince(base::TimeTicks time) EXCLUSIVE_LOCKS_REQUIRED(lock_); + + const size_t max_memory_usage_bytes_; + const size_t max_image_count_; + const base::TimeDelta expiration_delay_; + + scoped_refptr task_runner_; + + class AutoLock; + // |lock_| must be acquired when accessing the following members. + mutable absl::optional lock_; + + size_t total_memory_usage_bytes_ GUARDED_BY(lock_) = 0; + size_t total_image_count_ GUARDED_BY(lock_) = 0; + bool evict_expired_images_pending_ GUARDED_BY(lock_) = false; + + struct CacheEntry { + CacheEntry(std::unique_ptr backing); + CacheEntry(CacheEntry&& other); + CacheEntry& operator=(CacheEntry&& other); + ~CacheEntry(); + + std::unique_ptr backing; + base::TimeTicks timestamp; + }; + + // Holds most recently used images at the front of the queue. + base::circular_deque images_ GUARDED_BY(lock_); + + std::unique_ptr memory_pressure_listener_; + + base::WeakPtrFactory weak_ptr_factory_{this}; +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_SHARED_IMAGE_POOL_H_ diff --git a/ohos_nweb/src/nweb_impl.cc b/ohos_nweb/src/nweb_impl.cc index e92ab6a748..2c77999c7c 100644 --- a/ohos_nweb/src/nweb_impl.cc +++ b/ohos_nweb/src/nweb_impl.cc @@ -324,7 +324,6 @@ void InitialWebEngineArgs( web_engine_args.emplace_back("--off-screen-frame-rate=60"); web_engine_args.emplace_back("--no-unsandboxed-zygote"); web_engine_args.emplace_back("--no-zygote"); - web_engine_args.emplace_back("--enable-features=UseOzonePlatform"); web_engine_args.emplace_back("-ozone-platform=headless"); web_engine_args.emplace_back("--no-sandbox"); web_engine_args.emplace_back("--use-mobile-user-agent"); @@ -365,6 +364,29 @@ void InitialWebEngineArgs( web_engine_args.emplace_back("--ohos-enhance-surface"); } + std::vector enable_features; + enable_features.emplace_back("UseOzonePlatform"); + +#if defined(OHOS_NWEB_EX) + if (base::ohos::IsTabletDevice() || base::ohos::IsPcDevice()) { + if (g_browser_service_api_enabled) { + enable_features.emplace_back("SharedImagePool"); + } + } +#endif + + if (!enable_features.empty()) { + std::string aggregated_features; + for (const auto& feature : enable_features) { + if (!aggregated_features.empty()) { + aggregated_features.append(","); + } + aggregated_features.append(feature); + } + + web_engine_args.emplace_back("--enable-features=" + aggregated_features); + } + auto args_to_delete = GetArgsToDelete(init_args); bool xml_gpu = false; for (auto arg : args_to_delete) { -- Gitee From b478b093aa0fe904c9b8c9f6196a86b71ba160e4 Mon Sep 17 00:00:00 2001 From: v00863305 Date: Fri, 17 May 2024 14:14:55 +0300 Subject: [PATCH 5/5] Native buffer queue surface presenter Allows to use direct output presentation to native buffer queue surface (native window -> producer surface -> buffer queue -> consumer surface -> RenderSurfaceRenderNode) without any interaction with EGL platform adaptation layer (from GPU module). EGL adaptation layer: - implicit presentation (eglSwapBuffers) - implicit synchronization (hidden from us) - implicit DMA buffers allocation (by RS) - full frame update - no access to native window buffers - implicit buffer management in background (by RS) Buffer queue surface presenter: - explicit presentation (via NativeWindow API) - explicit synchronization (egl fenc sync + gpu fence) - explicit DMA buffers allocation (by us) - partial update - has access to native window buffers - explicit buffer management in background (by client) On chromium level (enable by default): --enable-feature=BufferQueueSurfacePresenter On system level (disable by default): persist.web.buffer_queue_surface_presenter.enable false Signed-off-by: Volykhin Andrei Change-Id: Iaaadef3cb4c58e516688395495f509c80ac2176d --- .../viz/service/display/direct_renderer.h | 3 + components/viz/service/display/display.cc | 12 + .../viz/service/display/output_surface.h | 3 + .../display/overlay_processor_interface.cc | 2 + .../viz/service/display/skia_renderer.cc | 3 + .../display_embedder/output_presenter.cc | 6 + .../display_embedder/output_presenter.h | 6 + .../display_embedder/output_presenter_gl.cc | 27 +- .../display_embedder/output_presenter_gl.h | 5 + .../skia_output_device_buffer_queue.cc | 51 ++ .../skia_output_device_buffer_queue.h | 5 + .../skia_output_surface_impl.cc | 18 + gpu/command_buffer/service/BUILD.gn | 7 + .../service/oh_native_buffer_utils.cc | 32 + .../service/oh_native_buffer_utils.h | 23 + .../oh_native_buffer_image_backing.cc | 463 ++++++++++++++ .../oh_native_buffer_image_backing.h | 108 ++++ .../oh_native_buffer_image_backing_factory.cc | 236 +++++++ .../oh_native_buffer_image_backing_factory.h | 115 ++++ .../shared_image/shared_image_backing.cc | 4 + .../shared_image/shared_image_backing.h | 5 + .../shared_image/shared_image_factory.cc | 8 + .../shared_image/shared_image_manager.cc | 3 + .../shared_image_representation.cc | 6 + .../shared_image_representation.h | 10 + .../service/image_transport_surface_ohos.cc | 70 ++- ui/gfx/BUILD.gn | 38 ++ ui/gfx/frame_data.h | 3 + ui/gfx/gpu_fence.cc | 12 +- ui/gfx/ohos/native_buffer.cc | 74 +++ ui/gfx/ohos/native_buffer.h | 71 +++ ui/gfx/ohos/native_buffer_manager.cc | 216 +++++++ ui/gfx/ohos/native_buffer_manager.h | 69 ++ ui/gfx/ohos/native_buffer_queue.cc | 595 ++++++++++++++++++ ui/gfx/ohos/native_buffer_queue.h | 113 ++++ ui/gfx/ohos/native_buffer_queue_cache.cc | 274 ++++++++ ui/gfx/ohos/native_buffer_queue_cache.h | 127 ++++ ui/gfx/ohos/native_buffer_queue_surface.h | 26 + ui/gfx/ohos/native_buffer_utils.cc | 32 + ui/gfx/ohos/native_buffer_utils.h | 25 + ui/gfx/ohos/oh_native_buffer_abi.h | 150 +++++ ui/gfx/ohos/oh_native_buffer_compat.cc | 100 +++ ui/gfx/ohos/oh_native_buffer_compat.h | 60 ++ ui/gfx/ohos/oh_native_surface_factory.cc | 76 +++ ui/gfx/ohos/oh_native_surface_factory.h | 73 +++ ui/gfx/ohos/oh_native_window_abi.h | 97 +++ ui/gfx/ohos/oh_native_window_compat.cc | 189 ++++++ ui/gfx/ohos/oh_native_window_compat.h | 80 +++ ui/gfx/ohos/scoped_native_buffer_handle.cc | 122 ++++ ui/gfx/ohos/scoped_native_buffer_handle.h | 88 +++ ui/gfx/ohos/scoped_oh_native_window.cc | 49 ++ ui/gfx/ohos/scoped_oh_native_window.h | 49 ++ ui/gl/BUILD.gn | 2 + .../gl_surface_egl_buffer_queue_presenter.cc | 293 +++++++++ ui/gl/gl_surface_egl_buffer_queue_presenter.h | 140 +++++ ui/gl/presenter.h | 15 +- 56 files changed, 4478 insertions(+), 11 deletions(-) create mode 100644 gpu/command_buffer/service/oh_native_buffer_utils.cc create mode 100644 gpu/command_buffer/service/oh_native_buffer_utils.h create mode 100644 gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.cc create mode 100644 gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.h create mode 100644 gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.cc create mode 100644 gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.h create mode 100644 ui/gfx/ohos/native_buffer.cc create mode 100644 ui/gfx/ohos/native_buffer.h create mode 100644 ui/gfx/ohos/native_buffer_manager.cc create mode 100644 ui/gfx/ohos/native_buffer_manager.h create mode 100644 ui/gfx/ohos/native_buffer_queue.cc create mode 100644 ui/gfx/ohos/native_buffer_queue.h create mode 100644 ui/gfx/ohos/native_buffer_queue_cache.cc create mode 100644 ui/gfx/ohos/native_buffer_queue_cache.h create mode 100644 ui/gfx/ohos/native_buffer_queue_surface.h create mode 100644 ui/gfx/ohos/native_buffer_utils.cc create mode 100644 ui/gfx/ohos/native_buffer_utils.h create mode 100755 ui/gfx/ohos/oh_native_buffer_abi.h create mode 100644 ui/gfx/ohos/oh_native_buffer_compat.cc create mode 100644 ui/gfx/ohos/oh_native_buffer_compat.h create mode 100644 ui/gfx/ohos/oh_native_surface_factory.cc create mode 100644 ui/gfx/ohos/oh_native_surface_factory.h create mode 100755 ui/gfx/ohos/oh_native_window_abi.h create mode 100644 ui/gfx/ohos/oh_native_window_compat.cc create mode 100644 ui/gfx/ohos/oh_native_window_compat.h create mode 100644 ui/gfx/ohos/scoped_native_buffer_handle.cc create mode 100644 ui/gfx/ohos/scoped_native_buffer_handle.h create mode 100644 ui/gfx/ohos/scoped_oh_native_window.cc create mode 100644 ui/gfx/ohos/scoped_oh_native_window.h create mode 100644 ui/gl/gl_surface_egl_buffer_queue_presenter.cc create mode 100644 ui/gl/gl_surface_egl_buffer_queue_presenter.h diff --git a/components/viz/service/display/direct_renderer.h b/components/viz/service/display/direct_renderer.h index 5305ad0dfd..7caa013d09 100644 --- a/components/viz/service/display/direct_renderer.h +++ b/components/viz/service/display/direct_renderer.h @@ -112,6 +112,9 @@ class VIZ_SERVICE_EXPORT DirectRenderer { gfx::CALayerResult ca_layer_error_code = gfx::kCALayerSuccess; #endif absl::optional choreographer_vsync_id; +#if BUILDFLAG(IS_OHOS) + bool local_frame = false; +#endif }; virtual void SwapBuffers(SwapFrameData swap_frame_data) = 0; virtual void SwapBuffersSkipped() {} diff --git a/components/viz/service/display/display.cc b/components/viz/service/display/display.cc index eacf72a518..b8adffe2a7 100644 --- a/components/viz/service/display/display.cc +++ b/components/viz/service/display/display.cc @@ -801,6 +801,9 @@ bool Display::DrawAndSwap(const DrawAndSwapParams& params) { } gfx::OverlayTransform current_display_transform = gfx::OVERLAY_TRANSFORM_NONE; +#if BUILDFLAG(IS_OHOS) + uint32_t current_frame_token = kInvalidOrLocalFrameToken; +#endif Surface* surface = surface_manager_->GetSurfaceForId(current_surface_id_); if (surface->HasActiveFrame()) { current_display_transform = @@ -812,6 +815,10 @@ bool Display::DrawAndSwap(const DrawAndSwapParams& params) { // the hint, the rest of the code ignores the hint too. current_display_transform = output_surface_->GetDisplayTransform(); } + +#if BUILDFLAG(IS_OHOS) + current_frame_token = surface->GetActiveFrameMetadata().frame_token; +#endif } base::ScopedClosureRunner visual_debugger_sync_scoped_exit( @@ -1077,6 +1084,11 @@ bool Display::DrawAndSwap(const DrawAndSwapParams& params) { overlay_processor_->GetCALayerErrorCode(); #endif +#if BUILDFLAG(IS_OHOS) + swap_frame_data.local_frame = + current_frame_token == kInvalidOrLocalFrameToken; +#endif + // We must notify scheduler and increase |pending_swaps_| before calling // SwapBuffers() as it can call DidReceiveSwapBuffersAck synchronously. if (scheduler_) diff --git a/components/viz/service/display/output_surface.h b/components/viz/service/display/output_surface.h index 3b556dd619..a19d6d7955 100644 --- a/components/viz/service/display/output_surface.h +++ b/components/viz/service/display/output_surface.h @@ -139,6 +139,9 @@ class VIZ_SERVICE_EXPORT OutputSurface { // Wayland backend is able to delegate these overlays without buffer // backings depending on the availability of a certain protocol. bool supports_non_backed_solid_color_overlays = false; + // Whether the platform supports visiblity control (including explicit + // buffer management). + bool supports_visibility_control = false; // SkColorType for all supported buffer formats. SkColorType sk_color_types[static_cast(gfx::BufferFormat::LAST) + 1] = diff --git a/components/viz/service/display/overlay_processor_interface.cc b/components/viz/service/display/overlay_processor_interface.cc index fea01b37ad..73d1be764b 100644 --- a/components/viz/service/display/overlay_processor_interface.cc +++ b/components/viz/service/display/overlay_processor_interface.cc @@ -116,6 +116,8 @@ OverlayProcessorInterface::CreateOverlayProcessor( .supports_two_yuv_hardware_overlays ? 2 : 1)); +#elif BUILDFLAG(IS_OHOS) + return std::make_unique(); #elif BUILDFLAG(IS_OZONE) #if !BUILDFLAG(IS_CASTOS) // In tests and Ozone/X11, we do not expect surfaceless surface support. diff --git a/components/viz/service/display/skia_renderer.cc b/components/viz/service/display/skia_renderer.cc index b8293061be..a74d7c4ade 100644 --- a/components/viz/service/display/skia_renderer.cc +++ b/components/viz/service/display/skia_renderer.cc @@ -949,6 +949,9 @@ void SkiaRenderer::SwapBuffers(SwapFrameData swap_frame_data) { output_frame.choreographer_vsync_id = swap_frame_data.choreographer_vsync_id; output_frame.size = viewport_size_for_swap_buffers(); output_frame.data.seq = swap_frame_data.seq; +#if BUILDFLAG(IS_OHOS) + output_frame.data.local_frame = swap_frame_data.local_frame; +#endif #if BUILDFLAG(IS_OHOS) swap_buffer_rect_.Intersect(gfx::Rect(surface_size_for_swap_buffers())); output_frame.sub_buffer_rect = swap_buffer_rect_; diff --git a/components/viz/service/display_embedder/output_presenter.cc b/components/viz/service/display_embedder/output_presenter.cc index 66f45a9166..3381bfa26e 100644 --- a/components/viz/service/display_embedder/output_presenter.cc +++ b/components/viz/service/display_embedder/output_presenter.cc @@ -163,4 +163,10 @@ bool OutputPresenter::SupportsGpuVSync() const { return false; } +#if BUILDFLAG(IS_OHOS) +bool OutputPresenter::SupportsOverrideBufferCount() const { + return false; +} +#endif + } // namespace viz diff --git a/components/viz/service/display_embedder/output_presenter.h b/components/viz/service/display_embedder/output_presenter.h index 9836e236f7..13992a3c99 100644 --- a/components/viz/service/display_embedder/output_presenter.h +++ b/components/viz/service/display_embedder/output_presenter.h @@ -138,6 +138,12 @@ class VIZ_SERVICE_EXPORT OutputPresenter { #if BUILDFLAG(IS_APPLE) virtual void SetCALayerErrorCode(gfx::CALayerResult ca_layer_error_code) {} #endif + +#if BUILDFLAG(IS_OHOS) + virtual bool SupportsOverrideBufferCount() const; + virtual int GetBufferCount() const = 0; + virtual void SetVisibility(bool visibility) {} +#endif }; } // namespace viz diff --git a/components/viz/service/display_embedder/output_presenter_gl.cc b/components/viz/service/display_embedder/output_presenter_gl.cc index 60323ece75..07f37e3085 100644 --- a/components/viz/service/display_embedder/output_presenter_gl.cc +++ b/components/viz/service/display_embedder/output_presenter_gl.cc @@ -102,7 +102,9 @@ gl::OverlayImage PresenterImageGL::GetOverlayImage( if (fence) { *fence = TakeGpuFence(scoped_overlay_read_access_->TakeAcquireFence()); } -#if BUILDFLAG(IS_OZONE) +#if BUILDFLAG(IS_OHOS) + return scoped_overlay_read_access_->GetNativeBuffer(); +#elif BUILDFLAG(IS_OZONE) return scoped_overlay_read_access_->GetNativePixmap(); #elif BUILDFLAG(IS_APPLE) return scoped_overlay_read_access_->GetIOSurface(); @@ -151,6 +153,10 @@ void OutputPresenterGL::InitializeCapabilities( #if BUILDFLAG(IS_ANDROID) capabilities->supports_dynamic_frame_buffer_allocation = true; #endif +#if BUILDFLAG(IS_OHOS) + capabilities->supports_visibility_control = + presenter_->SupportsVisibilityControl(); +#endif // TODO(https://crbug.com/1108406): only add supported formats base on // platform, driver, etc. @@ -266,8 +272,10 @@ void OutputPresenterGL::ScheduleOverlayPlane( // Note that |overlay_plane_candidate| has different types on different // platforms. On Android, Ozone, and Windows, it is an OverlayCandidate and on // macOS it is a CALayeroverlay. -#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_OZONE) -#if BUILDFLAG(IS_OZONE) +#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_OHOS) || BUILDFLAG(IS_OZONE) +#if BUILDFLAG(IS_OHOS) + gl::OverlayImage overlay_image = access ? access->GetNativeBuffer() : nullptr; +#elif BUILDFLAG(IS_OZONE) // TODO(crbug.com/1366808): Add ScopedOverlayAccess::GetOverlayImage() that // works on all platforms. gl::OverlayImage overlay_image = access ? access->GetNativePixmap() : nullptr; @@ -361,7 +369,20 @@ void OutputPresenterGL::SetCALayerErrorCode( gfx::CALayerResult ca_layer_error_code) { ca_layer_error_code_ = ca_layer_error_code; } +#endif +#if BUILDFLAG(IS_OHOS) +bool OutputPresenterGL::SupportsOverrideBufferCount() const { + return presenter_->SupportsOverrideBufferCount(); +} + +int OutputPresenterGL::GetBufferCount() const { + return presenter_->GetBufferCount(); +} + +void OutputPresenterGL::SetVisibility(bool visibility) { + presenter_->SetVisibility(visibility); +} #endif } // namespace viz diff --git a/components/viz/service/display_embedder/output_presenter_gl.h b/components/viz/service/display_embedder/output_presenter_gl.h index 1e9144a1b2..42664aa32d 100644 --- a/components/viz/service/display_embedder/output_presenter_gl.h +++ b/components/viz/service/display_embedder/output_presenter_gl.h @@ -64,6 +64,11 @@ class VIZ_SERVICE_EXPORT OutputPresenterGL : public OutputPresenter { #if BUILDFLAG(IS_APPLE) void SetCALayerErrorCode(gfx::CALayerResult ca_layer_error_code) final; #endif +#if BUILDFLAG(IS_OHOS) + bool SupportsOverrideBufferCount() const final; + int GetBufferCount() const final; + void SetVisibility(bool visibility) final; +#endif private: scoped_refptr presenter_; diff --git a/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc b/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc index 74ef82337d..f904330b46 100644 --- a/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc +++ b/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc @@ -180,6 +180,11 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( if (::features::IncreaseBufferCountForHighFrameRate()) { capabilities_.number_of_buffers = 5; } +#endif +#if BUILDFLAG(IS_OHOS) + if (presenter_->SupportsOverrideBufferCount()) { + capabilities_.number_of_buffers = presenter_->GetBufferCount(); + } #endif capabilities_.orientation_mode = OutputSurface::OrientationMode::kHardware; @@ -199,6 +204,21 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( capabilities_.pending_swap_params.max_pending_swaps_90hz = 3; capabilities_.pending_swap_params.max_pending_swaps_120hz = 4; } +#endif +#if BUILDFLAG(IS_OHOS) + if (presenter_->SupportsOverrideBufferCount() && + capabilities_.number_of_buffers >= 4) { + // Force the number of max pending frames to number_of_buffers - 2 + // to prevent frequent frame dropping. + // See RSBaseRenderUtil::DropFrameProcess() for more information. + capabilities_.pending_swap_params.max_pending_swaps = 2; + if (capabilities_.number_of_buffers >= 5) { + capabilities_.pending_swap_params.max_pending_swaps_90hz = + std::max(capabilities_.number_of_buffers - 2, 3); + capabilities_.pending_swap_params.max_pending_swaps_120hz = + std::max(capabilities_.number_of_buffers - 2, 4); + } + } #endif DCHECK_LT(capabilities_.pending_swap_params.max_pending_swaps, capabilities_.number_of_buffers); @@ -236,7 +256,13 @@ SkiaOutputDeviceBufferQueue::~SkiaOutputDeviceBufferQueue() { OutputPresenter::Image* SkiaOutputDeviceBufferQueue::GetNextImage() { DCHECK(!capabilities_.renderer_allocates_images); +#if BUILDFLAG(IS_OHOS) + if (available_images_.empty()) { + return nullptr; + } +#else CHECK(!available_images_.empty()); +#endif auto* image = available_images_.front(); available_images_.pop_front(); return image; @@ -665,6 +691,11 @@ SkSurface* SkiaOutputDeviceBufferQueue::BeginPaint( if (!current_image_) { current_image_ = GetNextImage(); +#if BUILDFLAG(IS_OHOS) + if (!current_image_) { + return nullptr; + } +#endif } if (!current_image_->sk_surface()) @@ -718,4 +749,24 @@ void SkiaOutputDeviceBufferQueue::SetVSyncDisplayID(int64_t display_id) { presenter_->SetVSyncDisplayID(display_id); } +#if BUILDFLAG(IS_OHOS) +void SkiaOutputDeviceBufferQueue::EnsureBackbuffer() { + if (capabilities_.supports_visibility_control) { + // Ignore EnsureBackbuffer if Reshape has not been called yet. + if (image_size_.IsEmpty()) + return; + + presenter_->SetVisibility(true); + RecreateImages(); + } +} + +void SkiaOutputDeviceBufferQueue::DiscardBackbuffer() { + if (capabilities_.supports_visibility_control) { + presenter_->SetVisibility(false); + FreeAllSurfaces(); + } +} +#endif + } // namespace viz diff --git a/components/viz/service/display_embedder/skia_output_device_buffer_queue.h b/components/viz/service/display_embedder/skia_output_device_buffer_queue.h index 1bc2deea90..469aa69232 100644 --- a/components/viz/service/display_embedder/skia_output_device_buffer_queue.h +++ b/components/viz/service/display_embedder/skia_output_device_buffer_queue.h @@ -67,6 +67,11 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue : public SkiaOutputDevice { void SetGpuVSyncEnabled(bool enabled) override; void SetVSyncDisplayID(int64_t display_id) override; +#if BUILDFLAG(IS_OHOS) + void EnsureBackbuffer() override; + void DiscardBackbuffer() override; +#endif + private: friend class SkiaOutputDeviceBufferQueueTest; diff --git a/components/viz/service/display_embedder/skia_output_surface_impl.cc b/components/viz/service/display_embedder/skia_output_surface_impl.cc index ba59918ef7..82d1b2de8d 100644 --- a/components/viz/service/display_embedder/skia_output_surface_impl.cc +++ b/components/viz/service/display_embedder/skia_output_surface_impl.cc @@ -296,6 +296,15 @@ void SkiaOutputSurfaceImpl::SetEnableDCLayers(bool enable) { void SkiaOutputSurfaceImpl::EnsureBackbuffer() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + +#if BUILDFLAG(IS_OHOS) + if (use_damage_area_from_skia_output_device_) { + damage_of_current_buffer_ = gfx::Rect(size_); + } else if (frame_buffer_damage_tracker_) { + frame_buffer_damage_tracker_->FrameBuffersChanged(size_); + } +#endif + // impl_on_gpu_ is released on the GPU thread by a posted task from // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained. auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::EnsureBackbuffer, @@ -305,6 +314,15 @@ void SkiaOutputSurfaceImpl::EnsureBackbuffer() { void SkiaOutputSurfaceImpl::DiscardBackbuffer() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + +#if BUILDFLAG(IS_OHOS) + if (use_damage_area_from_skia_output_device_) { + damage_of_current_buffer_ = gfx::Rect(size_); + } else if (frame_buffer_damage_tracker_) { + frame_buffer_damage_tracker_->FrameBuffersChanged(size_); + } +#endif + // impl_on_gpu_ is released on the GPU thread by a posted task from // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained. auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::DiscardBackbuffer, diff --git a/gpu/command_buffer/service/BUILD.gn b/gpu/command_buffer/service/BUILD.gn index ee2e46ecc3..72ca60976c 100644 --- a/gpu/command_buffer/service/BUILD.gn +++ b/gpu/command_buffer/service/BUILD.gn @@ -602,7 +602,14 @@ target(link_target_type, "gles2_sources") { configs -= [ "//build/config/compiler:default_optimization" ] configs += [ "//build/config/compiler:optimize_max" ] } + sources += [ + "oh_native_buffer_utils.cc", + "oh_native_buffer_utils.h", + "shared_image/oh_native_buffer_image_backing.cc", + "shared_image/oh_native_buffer_image_backing.h", + "shared_image/oh_native_buffer_image_backing_factory.cc", + "shared_image/oh_native_buffer_image_backing_factory.h", "shared_image/shared_image_pool.cc", "shared_image/shared_image_pool.h", ] diff --git a/gpu/command_buffer/service/oh_native_buffer_utils.cc b/gpu/command_buffer/service/oh_native_buffer_utils.cc new file mode 100644 index 0000000000..cce4e3645c --- /dev/null +++ b/gpu/command_buffer/service/oh_native_buffer_utils.cc @@ -0,0 +1,32 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/service/oh_native_buffer_utils.h" + +#include "base/check.h" +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" +#include "ui/gl/gl_gl_api_implementation.h" +#include "ui/gl/scoped_binders.h" + +#ifndef EGL_OHOS_image_native_buffer +#define EGL_OHOS_image_native_buffer 1 +#define EGL_NATIVE_BUFFER_OHOS 0x34E1 +#endif /* EGL_OHOS_image_native_buffer */ + +namespace gpu { + +gl::ScopedEGLImage CreateEGLImageFromNativeBufferHandle( + const gfx::ScopedNativeBufferHandle& handle) { + // According to "EGL_OHOS_image_native_buffer" extension the client buffer + // must be a pointer to a valid NativeWindowBuffer object. + DCHECK(handle.native_window_buffer()); + + EGLint egl_image_attribs[] = {EGL_IMAGE_PRESERVED_KHR, EGL_FALSE, EGL_NONE}; + EGLClientBuffer client_buffer = + static_cast(handle.native_window_buffer()); + return gl::MakeScopedEGLImage(EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_OHOS, + client_buffer, egl_image_attribs); +} + +} // namespace gpu diff --git a/gpu/command_buffer/service/oh_native_buffer_utils.h b/gpu/command_buffer/service/oh_native_buffer_utils.h new file mode 100644 index 0000000000..32ee42d8c7 --- /dev/null +++ b/gpu/command_buffer/service/oh_native_buffer_utils.h @@ -0,0 +1,23 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_OH_NATIVE_BUFFER_UTILS_H_ +#define GPU_COMMAND_BUFFER_SERVICE_OH_NATIVE_BUFFER_UTILS_H_ + +#include "gpu/gpu_gles2_export.h" +#include "ui/gl/scoped_egl_image.h" + +namespace gfx { +class ScopedNativeBufferHandle; +} // namespace gfx + +namespace gpu { + +// Creates an EGLImage from |handle|, setting EGL_IMAGE_PRESERVED_KHR to false. +GPU_GLES2_EXPORT gl::ScopedEGLImage CreateEGLImageFromNativeBufferHandle( + const gfx::ScopedNativeBufferHandle& handle); + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_OH_NATIVE_BUFFER_UTILS_H_ diff --git a/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.cc b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.cc new file mode 100644 index 0000000000..5af8300dfb --- /dev/null +++ b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.cc @@ -0,0 +1,463 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.h" + +#include "base/logging.h" +#include "gpu/command_buffer/service/oh_native_buffer_utils.h" +#include "gpu/command_buffer/service/shared_context_state.h" +#include "gpu/command_buffer/service/shared_image/shared_image_format_utils.h" +#include "gpu/command_buffer/service/shared_image/shared_image_representation.h" +#include "gpu/command_buffer/service/shared_image/skia_gl_image_representation.h" +#include "gpu/command_buffer/service/texture_manager.h" +#include "ui/gfx/ohos/native_buffer.h" +#include "ui/gl/gl_context.h" +#include "ui/gl/gl_fence_egl.h" +#include "ui/gl/gl_gl_api_implementation.h" +#include "ui/gl/scoped_binders.h" +#include "ui/gl/scoped_egl_image.h" +#include "ui/gl/shared_gl_fence_egl.h" + +namespace gpu { + +namespace { + +GLuint CreateAndBindTexture(EGLImage image, GLenum target) { + gl::GLApi* api = gl::g_current_gl_context; + GLuint service_id = 0; + api->glGenTexturesFn(1, &service_id); + gl::ScopedTextureBinder texture_binder(target, service_id); + + api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + glEGLImageTargetTexture2DOES(target, image); + + return service_id; +} + +} // namespace + +class OHNativeBufferImageBacking::GLRepresentationShared { + public: + GLRepresentationShared( + OHNativeBufferImageBacking* backing, + gl::ScopedEGLImage egl_image, + gles2::Texture* texture, + scoped_refptr texture_passthrough) + : backing_(backing), + egl_image_(std::move(egl_image)), + texture_(texture), + texture_passthrough_(std::move(texture_passthrough)) {} + + GLRepresentationShared(const GLRepresentationShared&) = delete; + GLRepresentationShared& operator=(const GLRepresentationShared&) = delete; + + ~GLRepresentationShared() { + EndAccess(); + bool have_context = backing_->have_context(); + if (!have_context) { + if (texture_passthrough_) { + texture_passthrough_->MarkContextLost(); + } + } + if (texture_) { + texture_.ExtractAsDangling()->RemoveLightweightRef(have_context); + } + } + + gles2::Texture* texture() { return texture_; } + const scoped_refptr& texture_passthrough() const { + return texture_passthrough_; + } + + bool BeginAccess(GLenum mode) { + if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) { + if (!backing_->BeginRead(this)) { + return false; + } + mode_ = RepresentationAccessMode::kRead; + } else if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) { + if (!backing_->BeginWrite()) { + return false; + } + mode_ = RepresentationAccessMode::kWrite; + } else { + NOTREACHED(); + } + return true; + } + + void EndAccess() { + if (mode_ == RepresentationAccessMode::kNone) { + return; + } + + // Pass this fence to its backing. + if (mode_ == RepresentationAccessMode::kRead) { + backing_->EndRead(this); + } else if (mode_ == RepresentationAccessMode::kWrite) { + backing_->EndWrite(); + } else { + NOTREACHED(); + } + mode_ = RepresentationAccessMode::kNone; + } + + private: + const raw_ptr backing_; + gl::ScopedEGLImage egl_image_; + raw_ptr texture_ = nullptr; + const scoped_refptr texture_passthrough_; + RepresentationAccessMode mode_ = RepresentationAccessMode::kNone; +}; + +class OHNativeBufferImageBacking::GLTextureOHNBImageRepresentation + : public GLTextureImageRepresentation { + public: + GLTextureOHNBImageRepresentation(SharedImageManager* manager, + OHNativeBufferImageBacking* backing, + MemoryTypeTracker* tracker, + gl::ScopedEGLImage egl_image, + gles2::Texture* texture) + : GLTextureImageRepresentation(manager, backing, tracker), + shared_(backing, std::move(egl_image), texture, nullptr) {} + + GLTextureOHNBImageRepresentation(const GLTextureOHNBImageRepresentation&) = + delete; + GLTextureOHNBImageRepresentation& operator=( + const GLTextureOHNBImageRepresentation&) = delete; + + ~GLTextureOHNBImageRepresentation() override = default; + + gles2::Texture* GetTexture(int plane_index) override { + DCHECK_EQ(plane_index, 0); + return shared_.texture(); + } + + bool BeginAccess(GLenum mode) override { return shared_.BeginAccess(mode); } + void EndAccess() override { shared_.EndAccess(); } + + private: + GLRepresentationShared shared_; +}; + +class OHNativeBufferImageBacking::GLTexturePassthroughOHNBImageRepresentation + : public GLTexturePassthroughImageRepresentation { + public: + GLTexturePassthroughOHNBImageRepresentation( + SharedImageManager* manager, + OHNativeBufferImageBacking* backing, + MemoryTypeTracker* tracker, + gl::ScopedEGLImage egl_image, + scoped_refptr texture) + : GLTexturePassthroughImageRepresentation(manager, backing, tracker), + shared_(backing, std::move(egl_image), nullptr, std::move(texture)) {} + + GLTexturePassthroughOHNBImageRepresentation( + const GLTexturePassthroughOHNBImageRepresentation&) = delete; + GLTexturePassthroughOHNBImageRepresentation& operator=( + const GLTexturePassthroughOHNBImageRepresentation&) = delete; + + ~GLTexturePassthroughOHNBImageRepresentation() override = default; + + const scoped_refptr& GetTexturePassthrough( + int plane_index) override { + DCHECK_EQ(plane_index, 0); + return shared_.texture_passthrough(); + } + + bool BeginAccess(GLenum mode) override { return shared_.BeginAccess(mode); } + void EndAccess() override { shared_.EndAccess(); } + + private: + GLRepresentationShared shared_; +}; + +class OHNativeBufferImageBacking::OverlayOHNBImageRepresentation + : public OverlayImageRepresentation { + public: + OverlayOHNBImageRepresentation(SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker) + : OverlayImageRepresentation(manager, backing, tracker) {} + + ~OverlayOHNBImageRepresentation() override = default; + + private: + OHNativeBufferImageBacking* ohnb_backing() const { + return static_cast(backing()); + } + + bool BeginReadAccess(gfx::GpuFenceHandle& acquire_fence) override { + return ohnb_backing()->BeginOverlayAccess(); + } + + void EndReadAccess(gfx::GpuFenceHandle release_fence) override { + DCHECK(release_fence.is_null()); + ohnb_backing()->EndOverlayAccess(); + } + + scoped_refptr GetNativeBuffer() const override { + return ohnb_backing()->GetNativeBuffer(); + } +}; + +OHNativeBufferImageBacking::OHNativeBufferImageBacking( + const Mailbox& mailbox, + viz::SharedImageFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + scoped_refptr buffer, + size_t estimated_size, + bool is_thread_safe, + bool use_passthrough) + : ClearTrackingSharedImageBacking(mailbox, + format, + size, + color_space, + surface_origin, + alpha_type, + usage, + estimated_size, + is_thread_safe), + buffer_(std::move(buffer)), + use_passthrough_(use_passthrough) { + DCHECK(buffer_ && buffer_->is_valid()); +} + +OHNativeBufferImageBacking::~OHNativeBufferImageBacking() { + // Locking here in destructor since we are accessing member variable + // |have_context_| via have_context(). + AutoLock auto_lock(this); +} + +SharedImageBackingType OHNativeBufferImageBacking::GetType() const { + return SharedImageBackingType::kOHNativeBuffer; +} + +void OHNativeBufferImageBacking::Update( + std::unique_ptr in_fence) { + DCHECK(!in_fence); + NOTREACHED(); +} + +std::unique_ptr +OHNativeBufferImageBacking::ProduceGLTexture(SharedImageManager* manager, + MemoryTypeTracker* tracker) { + // Use same texture for all the texture representations generated from same + // backing. + DCHECK(buffer_ && buffer_->is_valid()); + + auto egl_image = CreateEGLImageFromNativeBufferHandle( + buffer_->handle().CloneAsClientBuffer()); + if (!egl_image.is_valid()) { + return nullptr; + } + + // OpenHarmony documentation states that right GL format for RGBX + // OHNativeBuffer is GL_RGB8, so we don't use angle rgbx. + auto gl_format_desc = ToGLFormatDesc(format(), /*plane_index=*/0, + /*use_angle_rgbx_format=*/false); + GLuint service_id = + CreateAndBindTexture(egl_image.get(), gl_format_desc.target); + + auto* texture = + gles2::CreateGLES2TextureWithLightRef(service_id, gl_format_desc.target); + texture->SetLevelInfo(gl_format_desc.target, 0, + gl_format_desc.image_internal_format, size().width(), + size().height(), 1, 0, gl_format_desc.data_format, + gl_format_desc.data_type, ClearedRect()); + texture->SetImmutable(true, false); + + return std::make_unique( + manager, this, tracker, std::move(egl_image), std::move(texture)); +} + +std::unique_ptr +OHNativeBufferImageBacking::ProduceGLTexturePassthrough( + SharedImageManager* manager, + MemoryTypeTracker* tracker) { + // Use same texture for all the texture representations generated from same + // backing. + DCHECK(buffer_ && buffer_->is_valid()); + + auto egl_image = CreateEGLImageFromNativeBufferHandle( + buffer_->handle().CloneAsClientBuffer()); + if (!egl_image.is_valid()) { + return nullptr; + } + + // OpenHarmony documentation states that right GL format for RGBX + // OHNativeBuffer is GL_RGB8, so we don't use angle rgbx. + auto gl_format_desc = ToGLFormatDesc(format(), /*plane_index=*/0, + /*use_angle_rgbx_format=*/false); + GLuint service_id = + CreateAndBindTexture(egl_image.get(), gl_format_desc.target); + + auto texture = base::MakeRefCounted( + service_id, gl_format_desc.target); + texture->SetEstimatedSize(GetEstimatedSize()); + + return std::make_unique( + manager, this, tracker, std::move(egl_image), std::move(texture)); +} + +std::unique_ptr +OHNativeBufferImageBacking::ProduceSkiaGanesh( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr context_state) { + DCHECK(context_state); + + std::unique_ptr gl_representation; + if (use_passthrough_) { + gl_representation = ProduceGLTexturePassthrough(manager, tracker); + } else { + gl_representation = ProduceGLTexture(manager, tracker); + } + + if (!gl_representation) { + LOG(ERROR) << "Unable produce gl texture!"; + return nullptr; + } + + return SkiaGLImageRepresentation::Create(std::move(gl_representation), + std::move(context_state), manager, + this, tracker); +} + +std::unique_ptr +OHNativeBufferImageBacking::ProduceOverlay(SharedImageManager* manager, + MemoryTypeTracker* tracker) { + return std::make_unique(manager, this, + tracker); +} + +bool OHNativeBufferImageBacking::BeginWrite() { + AutoLock auto_lock(this); + + if (is_writing_ || !active_readers_.empty() || is_overlay_accessing_) { + DLOG(ERROR) << "BeginWrite should only be called when there are no other " + "readers or writers"; + return false; + } + + is_writing_ = true; + + // When multiple threads wants to write to the same backing, writer needs to + // wait on previous reads and writes to be finished. + if (!read_fences_.empty()) { + for (const auto& read_fence : read_fences_) { + read_fence.second->ServerWait(); + } + // Once all the read fences have been waited upon, its safe to clear all of + // them. Note that when there is an active writer, no one can read and hence + // can not update |read_fences_|. + read_fences_.clear(); + } + + if (write_fence_) { + write_fence_->ServerWait(); + } + + std::unique_ptr acquire_fence; + if (!buffer_->BeginAccess(acquire_fence)) { + return false; + } + + if (acquire_fence) { + acquire_fence->Wait(); + } + + return true; +} + +void OHNativeBufferImageBacking::EndWrite() { + AutoLock auto_lock(this); + + if (!is_writing_) { + DLOG(ERROR) << "Attempt to end write to a SharedImageBacking without a " + "successful begin write"; + return; + } + + is_writing_ = false; + write_fence_ = gl::GLFenceEGL::Create(); +} + +bool OHNativeBufferImageBacking::BeginRead( + const GLRepresentationShared* reader) { + AutoLock auto_lock(this); + + if (is_writing_) { + DLOG(ERROR) << "BeginRead should only be called when there are no writers"; + return false; + } + + if (active_readers_.contains(reader)) { + DLOG(ERROR) << "BeginRead was called twice on the same representation"; + return false; + } + + active_readers_.insert(reader); + if (write_fence_) { + write_fence_->ServerWait(); + } + + return true; +} + +void OHNativeBufferImageBacking::EndRead(const GLRepresentationShared* reader) { + AutoLock auto_lock(this); + + if (!active_readers_.contains(reader)) { + DLOG(ERROR) << "Attempt to end read to a SharedImageBacking without a " + "successful begin read"; + return; + } + active_readers_.erase(reader); + + read_fences_[gl::g_current_gl_context] = + base::MakeRefCounted(); +} + +bool OHNativeBufferImageBacking::BeginOverlayAccess() { + AutoLock auto_lock(this); + + DCHECK(!is_overlay_accessing_); + + if (is_writing_) { + LOG(ERROR) + << "BeginOverlayAccess should only be called when there are no writers"; + return false; + } + + if (write_fence_) { + write_fence_->ServerWait(); + } + + is_overlay_accessing_ = true; + return true; +} + +void OHNativeBufferImageBacking::EndOverlayAccess() { + AutoLock auto_lock(this); + + DCHECK(is_overlay_accessing_); + is_overlay_accessing_ = false; +} + +scoped_refptr OHNativeBufferImageBacking::GetNativeBuffer() + const { + AutoLock auto_lock(this); + return buffer_; +} + +} // namespace gpu diff --git a/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.h b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.h new file mode 100644 index 0000000000..370132a098 --- /dev/null +++ b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.h @@ -0,0 +1,108 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OH_NATIVE_BUFFER_IMAGE_BACKING_H_ +#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OH_NATIVE_BUFFER_IMAGE_BACKING_H_ + +#include "base/containers/flat_set.h" +#include "base/memory/scoped_refptr.h" +#include "gpu/command_buffer/service/shared_image/shared_image_backing.h" +#include "ui/gl/gl_bindings.h" + +namespace gfx { +class NativeBuffer; +} // namespace gfx + +namespace gl { +class GLFenceEGL; +class SharedGLFenceEGL; +} // namespace gl + +namespace gpu { + +// Implementation of SharedImageBacking that holds an OHNativeBuffer. This +// can be used to create a GL texture from the OHNativeBuffer backing. +class OHNativeBufferImageBacking : public ClearTrackingSharedImageBacking { + public: + OHNativeBufferImageBacking(const Mailbox& mailbox, + viz::SharedImageFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + scoped_refptr buffer, + size_t estimated_size, + bool is_thread_safe, + bool use_passthrough); + + ~OHNativeBufferImageBacking() override; + OHNativeBufferImageBacking(const OHNativeBufferImageBacking&) = delete; + OHNativeBufferImageBacking& operator=(const OHNativeBufferImageBacking&) = + delete; + + // SharedImageBacking implementation. + SharedImageBackingType GetType() const override; + void Update(std::unique_ptr in_fence) override; + + protected: + std::unique_ptr ProduceGLTexture( + SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + + std::unique_ptr + ProduceGLTexturePassthrough(SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + + std::unique_ptr ProduceSkiaGanesh( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr context_state) override; + + std::unique_ptr ProduceOverlay( + SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + + private: + class GLRepresentationShared; + class GLTextureOHNBImageRepresentation; + class GLTexturePassthroughOHNBImageRepresentation; + class OverlayOHNBImageRepresentation; + + bool BeginWrite(); + void EndWrite(); + bool BeginRead(const GLRepresentationShared* reader); + void EndRead(const GLRepresentationShared* reader); + + bool BeginOverlayAccess(); + void EndOverlayAccess(); + + scoped_refptr GetNativeBuffer() const; + + scoped_refptr buffer_; + + // All reads and writes must wait for exiting writes to complete. + // TODO(vikassoni): Use SharedGLFenceEGL here instead of GLFenceEGL here in + // future for |write_fence_| once the SharedGLFenceEGL has the capability to + // support multiple GLContexts. + std::unique_ptr write_fence_ GUARDED_BY(lock_); + bool is_writing_ GUARDED_BY(lock_) = false; + + // All writes must wait for existing reads to complete. For a given GL + // context, we only need to keep the most recent fence. Waiting on the most + // recent read fence is enough to make sure all past read fences have been + // signalled. + base::flat_map> read_fences_ + GUARDED_BY(lock_); + base::flat_set active_readers_ + GUARDED_BY(lock_); + + bool is_overlay_accessing_ GUARDED_BY(lock_) = false; + + const bool use_passthrough_; +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OH_NATIVE_BUFFER_IMAGE_BACKING_H_ diff --git a/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.cc b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.cc new file mode 100644 index 0000000000..be786dcaba --- /dev/null +++ b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.cc @@ -0,0 +1,236 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.h" + +#include "base/feature_list.h" +#include "base/logging.h" +#include "components/viz/common/resources/resource_format_utils.h" +#include "gpu/command_buffer/common/shared_image_usage.h" +#include "gpu/command_buffer/service/feature_info.h" +#include "gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing.h" +#include "gpu/command_buffer/service/shared_image/shared_image_format_utils.h" +#include "ui/gfx/ohos/native_buffer.h" +#include "ui/gfx/ohos/oh_native_surface_factory.h" +#include "ui/gl/gl_bindings.h" +#include "ui/gl/gl_context.h" +#include "ui/gl/gl_utils.h" + +namespace gpu { + +namespace { + +gfx::BufferUsage GetBufferUsage(uint32_t usage, bool cpu_read_write) { + if (usage & SHARED_IMAGE_USAGE_SCANOUT) { + return cpu_read_write ? gfx::BufferUsage::SCANOUT_CPU_READ_WRITE + : gfx::BufferUsage::SCANOUT; + } else { + return cpu_read_write ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE + : gfx::BufferUsage::GPU_READ; + } +} + +// See OutputPresenterGL::kDefaultSharedImageUsage. +constexpr uint32_t kPrimaryPlaneUsage = + SHARED_IMAGE_USAGE_DISPLAY_READ | SHARED_IMAGE_USAGE_DISPLAY_WRITE | + SHARED_IMAGE_USAGE_SCANOUT | SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT; + +constexpr uint32_t kSupportedUsage = + SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT | + SHARED_IMAGE_USAGE_DISPLAY_WRITE | SHARED_IMAGE_USAGE_DISPLAY_READ | + SHARED_IMAGE_USAGE_RASTER | SHARED_IMAGE_USAGE_OOP_RASTERIZATION | + SHARED_IMAGE_USAGE_SCANOUT | SHARED_IMAGE_USAGE_HIGH_PERFORMANCE_GPU; + +} // namespace + +OHNativeBufferImageBackingFactory::OHNativeBufferImageBackingFactory( + const gles2::FeatureInfo* feature_info, + const GpuPreferences& gpu_preferences) + : SharedImageBackingFactory(kSupportedUsage), + use_passthrough_(gpu_preferences.use_passthrough_cmd_decoder && + gl::PassthroughCommandDecoderSupported()) { + DCHECK(gfx::OHNativeSurfaceFactory::SupportsNativeBuffers()); + + DCHECK(gl::g_current_gl_driver->ext.b_GL_OES_EGL_image); + + gl::GLApi* api = gl::g_current_gl_context; + api->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_gl_texture_size_); + // Ensure max_texture_size_ is less than INT_MAX so that gfx::Rect and friends + // can be used to accurately represent all valid sub-rects, with overflow + // cases, clamped to INT_MAX, always invalid. + max_gl_texture_size_ = std::min(max_gl_texture_size_, INT_MAX - 1); + + auto gpu_memory_buffer_formats = + feature_info->feature_flags().gpu_memory_buffer_formats; + auto* factory = gfx::OHNativeSurfaceFactory::GetInstance(); + + for (gfx::BufferFormat buffer_format : gpu_memory_buffer_formats) { + if (factory->CanCreateNativeBufferForFormat(buffer_format)) { + viz::SharedImageFormat format = viz::SharedImageFormat::SinglePlane( + viz::GetResourceFormat(buffer_format)); + supported_formats_.insert(format); + } + } +} + +OHNativeBufferImageBackingFactory::~OHNativeBufferImageBackingFactory() = + default; + +// static +bool OHNativeBufferImageBackingFactory::IsOHNBSharedImageSupported( + const GpuPreferences& gpu_preferences) { + const bool supports_native_buffers = + gfx::OHNativeSurfaceFactory::SupportsNativeBuffers(); + const bool is_skia_gl = gpu_preferences.gr_context_type == GrContextType::kGL; + return supports_native_buffers && is_skia_gl; +} + +std::unique_ptr +OHNativeBufferImageBackingFactory::CreateSharedImage( + const Mailbox& mailbox, + viz::SharedImageFormat format, + SurfaceHandle surface_handle, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label, + bool is_thread_safe) { + return CreateSharedImageInternal( + mailbox, format, surface_handle, size, color_space, surface_origin, + alpha_type, usage, is_thread_safe, base::span()); +} + +std::unique_ptr +OHNativeBufferImageBackingFactory::CreateSharedImage( + const Mailbox& mailbox, + viz::SharedImageFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label, + base::span pixel_data) { + NOTREACHED(); + return nullptr; +} + +std::unique_ptr +OHNativeBufferImageBackingFactory::CreateSharedImage( + const Mailbox& mailbox, + gfx::GpuMemoryBufferHandle handle, + gfx::BufferFormat buffer_format, + gfx::BufferPlane plane, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label) { + NOTREACHED(); + return nullptr; +} + +bool OHNativeBufferImageBackingFactory::IsSupported( + uint32_t usage, + viz::SharedImageFormat format, + const gfx::Size& size, + bool thread_safe, + gfx::GpuMemoryBufferType gmb_type, + GrContextType gr_context_type, + base::span pixel_data) { + if (format.is_multi_plane()) { + return false; + } + + if (gmb_type != gfx::EMPTY_BUFFER) { + return false; + } + + if (gr_context_type != GrContextType::kGL) { + return false; + } + + // For now just use for primary plane buffers. + if (usage != kPrimaryPlaneUsage) { + return false; + } + + if (!pixel_data.empty()) { + return false; + } + + if (!base::Contains(supported_formats_, format)) { + return false; + } + + return true; +} + +bool OHNativeBufferImageBackingFactory::ValidateUsage( + uint32_t usage, + const gfx::Size& size, + viz::SharedImageFormat format) const { + if (!base::Contains(supported_formats_, format)) { + LOG(ERROR) << "viz::SharedImageFormat " << format.ToString() + << " not supported by native buffer"; + return false; + } + + // Check against the current size restrictions. + if (size.width() < 1 || size.height() < 1 || + size.width() > max_gl_texture_size_ || + size.height() > max_gl_texture_size_) { + LOG(ERROR) << "CreateSharedImage: invalid size=" << size.ToString() + << " max_gl_texture_size=" << max_gl_texture_size_; + return false; + } + + return true; +} + +std::unique_ptr +OHNativeBufferImageBackingFactory::CreateSharedImageInternal( + const Mailbox& mailbox, + viz::SharedImageFormat format, + SurfaceHandle surface_handle, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + bool is_thread_safe, + base::span pixel_data) { + DCHECK(!format.IsCompressed()); + + if (!ValidateUsage(usage, size, format)) { + return nullptr; + } + + // Calculate SharedImage size in bytes. + auto estimated_size = format.MaybeEstimatedSizeInBytes(size); + if (!estimated_size) { + LOG(ERROR) << "Failed to calculate SharedImage size"; + return nullptr; + } + + gfx::BufferUsage buffer_usage = GetBufferUsage(usage, !pixel_data.empty()); + + auto* factory = gfx::OHNativeSurfaceFactory::GetInstance(); + scoped_refptr buffer = factory->CreateNativeBuffer( + surface_handle, size, ToBufferFormat(format), buffer_usage); + if (!buffer) { + LOG(ERROR) << "Failed to create native buffer"; + return nullptr; + } + + return std::make_unique( + mailbox, format, size, color_space, surface_origin, alpha_type, usage, + std::move(buffer), estimated_size.value(), is_thread_safe, + use_passthrough_); +} + +} // namespace gpu diff --git a/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.h b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.h new file mode 100644 index 0000000000..b7027d7b4a --- /dev/null +++ b/gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.h @@ -0,0 +1,115 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OH_NATIVE_BUFFER_IMAGE_BACKING_FACTORY_H_ +#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OH_NATIVE_BUFFER_IMAGE_BACKING_FACTORY_H_ + +#include "base/containers/flat_set.h" +#include "gpu/command_buffer/service/shared_image/shared_image_backing_factory.h" +#include "gpu/gpu_gles2_export.h" + +namespace gfx { +class Size; +class ColorSpace; +} // namespace gfx + +namespace gpu { + +namespace gles2 { +class FeatureInfo; +} // namespace gles2 + +class SharedImageBacking; +struct Mailbox; + +// Implementation of SharedImageBackingFactory that produces OHNativeBuffer +// backed SharedImages. This is meant to be used on OpenHarmony only. +class GPU_GLES2_EXPORT OHNativeBufferImageBackingFactory + : public SharedImageBackingFactory { + public: + explicit OHNativeBufferImageBackingFactory( + const gles2::FeatureInfo* feature_info, + const GpuPreferences& gpu_preferences); + + OHNativeBufferImageBackingFactory(const OHNativeBufferImageBackingFactory&) = + delete; + OHNativeBufferImageBackingFactory& operator=( + const OHNativeBufferImageBackingFactory&) = delete; + + ~OHNativeBufferImageBackingFactory() override; + + // Returns true if OHNB shared images are supported and this factory should be + // used. + static bool IsOHNBSharedImageSupported(const GpuPreferences& gpu_preferences); + + // SharedImageBackingFactory implementation. + std::unique_ptr CreateSharedImage( + const Mailbox& mailbox, + viz::SharedImageFormat format, + SurfaceHandle surface_handle, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label, + bool is_thread_safe) override; + std::unique_ptr CreateSharedImage( + const Mailbox& mailbox, + viz::SharedImageFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label, + base::span pixel_data) override; + std::unique_ptr CreateSharedImage( + const Mailbox& mailbox, + gfx::GpuMemoryBufferHandle handle, + gfx::BufferFormat format, + gfx::BufferPlane plane, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + std::string debug_label) override; + bool IsSupported(uint32_t usage, + viz::SharedImageFormat format, + const gfx::Size& size, + bool thread_safe, + gfx::GpuMemoryBufferType gmb_type, + GrContextType gr_context_type, + base::span pixel_data) override; + bool IsFormatSupported(viz::SharedImageFormat format); + + private: + bool ValidateUsage(uint32_t usage, + const gfx::Size& size, + viz::SharedImageFormat format) const; + + std::unique_ptr CreateSharedImageInternal( + const Mailbox& mailbox, + viz::SharedImageFormat format, + SurfaceHandle surface_handle, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + GrSurfaceOrigin surface_origin, + SkAlphaType alpha_type, + uint32_t usage, + bool is_thread_safe, + base::span pixel_data); + + base::flat_set supported_formats_; + + // Used to limit the max size of native buffer. + int32_t max_gl_texture_size_ = 0; + + const bool use_passthrough_; +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OH_NATIVE_BUFFER_IMAGE_BACKING_FACTORY_H_ diff --git a/gpu/command_buffer/service/shared_image/shared_image_backing.cc b/gpu/command_buffer/service/shared_image/shared_image_backing.cc index f2f780e463..e1cb3f49c4 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_backing.cc +++ b/gpu/command_buffer/service/shared_image/shared_image_backing.cc @@ -59,6 +59,10 @@ const char* BackingTypeToString(SharedImageBackingType type) { return "DCompSurface"; case SharedImageBackingType::kDXGISwapChain: return "DXGISwapChain"; +#if BUILDFLAG(IS_OHOS) + case SharedImageBackingType::kOHNativeBuffer: + return "OHNativeBufferImageBacking"; +#endif } NOTREACHED(); } diff --git a/gpu/command_buffer/service/shared_image/shared_image_backing.h b/gpu/command_buffer/service/shared_image/shared_image_backing.h index ec06bb61b0..e71f55aa9e 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_backing.h +++ b/gpu/command_buffer/service/shared_image/shared_image_backing.h @@ -86,7 +86,12 @@ enum class SharedImageBackingType { kIOSurface = 15, kDCompSurface = 16, kDXGISwapChain = 17, +#if BUILDFLAG(IS_OHOS) + kOHNativeBuffer = 18, + kMaxValue = kOHNativeBuffer +#else kMaxValue = kDXGISwapChain +#endif }; #if BUILDFLAG(IS_WIN) diff --git a/gpu/command_buffer/service/shared_image/shared_image_factory.cc b/gpu/command_buffer/service/shared_image/shared_image_factory.cc index bb2525aeb7..66edca54de 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_factory.cc +++ b/gpu/command_buffer/service/shared_image/shared_image_factory.cc @@ -78,6 +78,7 @@ #endif // defined(USE_EGL) #if BUILDFLAG(IS_OHOS) +#include "gpu/command_buffer/service/shared_image/oh_native_buffer_image_backing_factory.h" #include "gpu/command_buffer/service/shared_image/shared_image_pool.h" #endif @@ -267,6 +268,13 @@ SharedImageFactory::SharedImageFactory( std::make_unique(context_state); factories_.push_back(std::move(external_vk_image_factory)); } +#elif BUILDFLAG(IS_OHOS) + if (OHNativeBufferImageBackingFactory::IsOHNBSharedImageSupported( + gpu_preferences)) { + auto ohnb_factory = std::make_unique( + feature_info.get(), gpu_preferences); + factories_.push_back(std::move(ohnb_factory)); + } #elif BUILDFLAG(IS_OZONE) // For all Ozone platforms - Desktop Linux, ChromeOS, Fuchsia, CastOS. if (ui::OzonePlatform::GetInstance() diff --git a/gpu/command_buffer/service/shared_image/shared_image_manager.cc b/gpu/command_buffer/service/shared_image/shared_image_manager.cc index b8255939ab..467db57892 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_manager.cc +++ b/gpu/command_buffer/service/shared_image/shared_image_manager.cc @@ -37,6 +37,7 @@ #if BUILDFLAG(IS_OHOS) #include "gpu/command_buffer/service/shared_image/shared_image_pool.h" +#include "ui/gfx/ohos/oh_native_surface_factory.h" #endif #if DCHECK_IS_ON() @@ -520,6 +521,8 @@ bool SharedImageManager::SupportsScanoutImages() { return true; #elif BUILDFLAG(IS_ANDROID) return base::AndroidHardwareBufferCompat::IsSupportAvailable(); +#elif BUILDFLAG(IS_OHOS) + return gfx::OHNativeSurfaceFactory::SupportsNativeBuffers(); #elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) return ui::OzonePlatform::GetInstance() ->GetPlatformRuntimeProperties() diff --git a/gpu/command_buffer/service/shared_image/shared_image_representation.cc b/gpu/command_buffer/service/shared_image/shared_image_representation.cc index 6d08caebd6..d456d9acad 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_representation.cc +++ b/gpu/command_buffer/service/shared_image/shared_image_representation.cc @@ -645,6 +645,12 @@ OverlayImageRepresentation::GetAHardwareBufferFenceSync() { NOTREACHED(); return nullptr; } +#elif BUILDFLAG(IS_OHOS) +scoped_refptr +OverlayImageRepresentation::GetNativeBuffer() const { + NOTREACHED(); + return nullptr; +} #elif BUILDFLAG(IS_OZONE) scoped_refptr OverlayImageRepresentation::GetNativePixmap() { return backing()->GetNativePixmap(); diff --git a/gpu/command_buffer/service/shared_image/shared_image_representation.h b/gpu/command_buffer/service/shared_image/shared_image_representation.h index 9c89e0d736..1899a5a2cf 100644 --- a/gpu/command_buffer/service/shared_image/shared_image_representation.h +++ b/gpu/command_buffer/service/shared_image/shared_image_representation.h @@ -40,6 +40,10 @@ extern "C" typedef struct AHardwareBuffer AHardwareBuffer; #endif +#if BUILDFLAG(IS_OHOS) +#include "ui/gfx/ohos/native_buffer.h" +#endif + #if BUILDFLAG(IS_WIN) #include #include @@ -775,6 +779,10 @@ class GPU_GLES2_EXPORT OverlayImageRepresentation GetAHardwareBufferFenceSync() { return representation()->GetAHardwareBufferFenceSync(); } +#elif BUILDFLAG(IS_OHOS) + scoped_refptr GetNativeBuffer() const { + return representation()->GetNativeBuffer(); + } #elif BUILDFLAG(IS_OZONE) scoped_refptr GetNativePixmap() { return representation()->GetNativePixmap(); @@ -828,6 +836,8 @@ class GPU_GLES2_EXPORT OverlayImageRepresentation virtual AHardwareBuffer* GetAHardwareBuffer(); virtual std::unique_ptr GetAHardwareBufferFenceSync(); +#elif BUILDFLAG(IS_OHOS) + virtual scoped_refptr GetNativeBuffer() const; #elif BUILDFLAG(IS_OZONE) scoped_refptr GetNativePixmap(); #elif BUILDFLAG(IS_WIN) diff --git a/gpu/ipc/service/image_transport_surface_ohos.cc b/gpu/ipc/service/image_transport_surface_ohos.cc index 0242f93b13..fc66529e0c 100644 --- a/gpu/ipc/service/image_transport_surface_ohos.cc +++ b/gpu/ipc/service/image_transport_surface_ohos.cc @@ -1,15 +1,33 @@ -// Copyright 2013 The Chromium Authors +// Copyright 2024 The Chromium Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "gpu/ipc/service/image_transport_surface.h" +#include "base/command_line.h" +#include "base/feature_list.h" +#include "base/logging.h" #include "build/build_config.h" +#include "content/public/common/content_switches.h" +#include "gpu/ipc/common/nweb_native_window_tracker.h" #include "gpu/ipc/service/pass_through_image_transport_surface.h" +#include "third_party/ohos_ndk/includes/ohos_adapter/ohos_adapter_helper.h" +#include "ui/gfx/ohos/oh_native_surface_factory.h" +#include "ui/gfx/ohos/scoped_oh_native_window.h" +#include "ui/gl/gl_surface_egl_buffer_queue_presenter.h" #include "ui/gl/init/gl_factory.h" namespace gpu { +namespace { + +// Use buffer queue surface presenter for output presentation on OpenHarmony. +BASE_FEATURE(kBufferQueueSurfacePresenter, + "BufferQueueSurfacePresenter", + base::FEATURE_ENABLED_BY_DEFAULT); + +} // namespace + // static scoped_refptr ImageTransportSurface::CreatePresenter( gl::GLDisplay* display, @@ -17,7 +35,52 @@ scoped_refptr ImageTransportSurface::CreatePresenter( SurfaceHandle surface_handle, gl::GLSurfaceFormat format) { DCHECK_NE(surface_handle, kNullSurfaceHandle); - return gl::init::CreateSurfacelessViewGLSurface(display, surface_handle); + if (gl::GetGLImplementation() == gl::kGLImplementationMockGL || + gl::GetGLImplementation() == gl::kGLImplementationStubGL) { + return nullptr; + } + + static bool persist_web_buffer_queue_surface_presenter_enabled = + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetSystemPropertiesInstance() + .GetBoolParameter("persist.web.buffer_queue_surface_presenter.enable", + false); + if (!persist_web_buffer_queue_surface_presenter_enabled) { + return nullptr; + } + + if (!base::FeatureList::IsEnabled(kBufferQueueSurfacePresenter)) { + return nullptr; + } + + if (base::CommandLine::ForCurrentProcess()->HasSwitch( + switches::kOhosHanceSurface)) { + LOG(INFO) << "Buffer queue surface presenter with enhance surface is not " + "supported"; + return nullptr; + } + + if (!gfx::OHNativeSurfaceFactory::SupportsNativeBuffers()) { + return nullptr; + } + + gfx::ScopedOHNativeWindow scoped_window = gfx::ScopedOHNativeWindow::Wrap( + NWebNativeWindowTracker::GetInstance()->GetNativeWindow(surface_handle)); + if (!scoped_window) { + LOG(WARNING) << "Failed to acquire native window"; + return nullptr; + } + + auto* factory = gfx::OHNativeSurfaceFactory::GetInstance(); + + auto presenter = base::MakeRefCounted( + factory, display->GetAs(), surface_handle, + std::move(scoped_window)); + if (!presenter->Initialize()) { + return nullptr; + } + + return presenter; } // static @@ -33,8 +96,9 @@ scoped_refptr ImageTransportSurface::CreateNativeGLSurface( if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) { override_vsync_for_multi_window_swap = true; } - if (!surface) + if (!surface) { return surface; + } return scoped_refptr(new PassThroughImageTransportSurface( delegate, surface.get(), override_vsync_for_multi_window_swap)); } diff --git a/ui/gfx/BUILD.gn b/ui/gfx/BUILD.gn index 87becd0fc8..8a59fc0b40 100644 --- a/ui/gfx/BUILD.gn +++ b/ui/gfx/BUILD.gn @@ -18,6 +18,10 @@ if (is_android) { import("//build/config/android/rules.gni") } +if (is_ohos) { + import("//build/config/ohos/config.gni") +} + # Several targets want to include this header file, and some of them are # child dependencies of "gfx". Therefore, we separate it out here so multiple # targets can all have a dependency for header checking purposes without @@ -160,6 +164,33 @@ component("gfx") { "android/view_configuration.h", ] } + if (is_ohos) { + sources += [ + "ohos/native_buffer_manager.cc", + "ohos/native_buffer_manager.h", + "ohos/native_buffer_queue_cache.cc", + "ohos/native_buffer_queue_cache.h", + "ohos/native_buffer_queue_surface.h", + "ohos/native_buffer_queue.cc", + "ohos/native_buffer_queue.h", + "ohos/native_buffer_utils.cc", + "ohos/native_buffer_utils.h", + "ohos/native_buffer.cc", + "ohos/native_buffer.h", + "ohos/oh_native_buffer_abi.h", + "ohos/oh_native_buffer_compat.cc", + "ohos/oh_native_buffer_compat.h", + "ohos/oh_native_surface_factory.cc", + "ohos/oh_native_surface_factory.h", + "ohos/oh_native_window_abi.h", + "ohos/oh_native_window_compat.cc", + "ohos/oh_native_window_compat.h", + "ohos/scoped_native_buffer_handle.cc", + "ohos/scoped_native_buffer_handle.h", + "ohos/scoped_oh_native_window.cc", + "ohos/scoped_oh_native_window.h", + ] + } if (is_linux || is_chromeos) { sources += [ "font_fallback_linux.cc", @@ -348,6 +379,13 @@ component("gfx") { ] } + if (is_ohos) { + if (!is_debug) { + configs -= [ "//build/config/compiler:default_optimization" ] + configs += [ "//build/config/compiler:optimize_max" ] + } + } + if (use_blink) { if (is_android || is_fuchsia || is_ios || is_ohos) { sources += [ diff --git a/ui/gfx/frame_data.h b/ui/gfx/frame_data.h index a129402fa9..3afcb910df 100644 --- a/ui/gfx/frame_data.h +++ b/ui/gfx/frame_data.h @@ -20,6 +20,9 @@ struct FrameData { // point). This may happen for some cases, like the ozone demo, tests, or // users of GLSurface other than SkiaRenderer. int64_t seq = -1; + + // Whether this frame is created locally inside viz compositor. + bool local_frame = false; }; } // namespace gfx diff --git a/ui/gfx/gpu_fence.cc b/ui/gfx/gpu_fence.cc index 77ca117a46..807295393d 100644 --- a/ui/gfx/gpu_fence.cc +++ b/ui/gfx/gpu_fence.cc @@ -9,7 +9,8 @@ #include "base/time/time.h" #include "build/build_config.h" -#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) +#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \ + BUILDFLAG(IS_OHOS) #include #endif @@ -42,7 +43,8 @@ void GpuFence::Wait() { return; } -#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) +#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \ + BUILDFLAG(IS_OHOS) static const int kInfiniteSyncWaitTimeout = -1; DCHECK_GE(fence_handle_.owned_fd.get(), 0); if (sync_wait(fence_handle_.owned_fd.get(), kInfiniteSyncWaitTimeout) < 0) { @@ -57,7 +59,8 @@ void GpuFence::Wait() { GpuFence::FenceStatus GpuFence::GetStatusChangeTime(int fd, base::TimeTicks* time) { DCHECK_NE(fd, -1); -#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) +#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \ + BUILDFLAG(IS_OHOS) auto info = std::unique_ptr{ sync_fence_info(fd), sync_fence_info_free}; @@ -89,7 +92,8 @@ GpuFence::FenceStatus GpuFence::GetStatusChangeTime(int fd, base::TimeTicks GpuFence::GetMaxTimestamp() const { base::TimeTicks timestamp; -#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) +#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \ + BUILDFLAG(IS_OHOS) FenceStatus status = GetStatusChangeTime(fence_handle_.owned_fd.get(), ×tamp); DCHECK_EQ(status, FenceStatus::kSignaled); diff --git a/ui/gfx/ohos/native_buffer.cc b/ui/gfx/ohos/native_buffer.cc new file mode 100644 index 0000000000..c5c731e221 --- /dev/null +++ b/ui/gfx/ohos/native_buffer.cc @@ -0,0 +1,74 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/native_buffer.h" + +#include "base/logging.h" +#include "ui/gfx/ohos/native_buffer_utils.h" + +namespace gfx { + +NativeBuffer::NativeBuffer(ScopedNativeBufferHandle handle, + const gfx::Size& size, + bool is_primary_plane, + base::WeakPtr delegate) + : handle_(std::move(handle)), + size_(size), + is_primary_plane_(is_primary_plane), + delegate_(std::move(delegate)), + sequence_number_(GetNativeBufferSequenceNumber(handle_)) { + DCHECK(handle_.is_valid()); + DCHECK_NE(sequence_number_, kInvalidNativeBufferSequenceNumber); +} + +NativeBuffer::~NativeBuffer() { + Destroy(); +} + +bool NativeBuffer::BeginAccess(std::unique_ptr& acquire_fence) { + if (!is_primary_plane_) { + return true; + } + + if (is_accessing_) { + return true; + } + + is_accessing_ = true; + return delegate_ ? delegate_->OnBeginAccess(this, acquire_fence) : false; +} + +bool NativeBuffer::EndAccess() { + if (!is_primary_plane_) { + return true; + } + + if (!is_accessing_) { + DLOG(ERROR) << "Attempt to end access without a successful begin access " + " seqnum=" + << sequence_number_; + return false; + } + + is_accessing_ = false; + return delegate_ ? delegate_->OnEndAccess(this) : true; +} + +void NativeBuffer::Destroy() { + if (!is_primary_plane_) { + return; + } + + if (!delegate_) { + return; + } + + if (is_accessing_) { + delegate_->OnEndAccess(this); + } + + delegate_->OnDestroy(this); +} + +} // namespace gfx diff --git a/ui/gfx/ohos/native_buffer.h b/ui/gfx/ohos/native_buffer.h new file mode 100644 index 0000000000..9270e7b0bd --- /dev/null +++ b/ui/gfx/ohos/native_buffer.h @@ -0,0 +1,71 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_NATIVE_BUFFER_H_ +#define UI_GFX_OHOS_NATIVE_BUFFER_H_ + +#include + +#include "base/memory/ref_counted.h" +#include "base/memory/weak_ptr.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/gpu_fence.h" +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" + +namespace gfx { + +// This represents a buffer that can be directly imported via GL for +// rendering. +class GFX_EXPORT NativeBuffer + : public base::RefCountedThreadSafe { + public: + class Delegate { + public: + virtual ~Delegate() = default; + + virtual bool OnBeginAccess( + gfx::NativeBuffer* buffer, + std::unique_ptr& acquire_fence) = 0; + virtual bool OnEndAccess(gfx::NativeBuffer* buffer) = 0; + virtual void OnDestroy(gfx::NativeBuffer* buffer) = 0; + }; + + NativeBuffer(ScopedNativeBufferHandle handle, + const gfx::Size& size, + bool is_primary_plane, + base::WeakPtr delegate = nullptr); + + NativeBuffer(const NativeBuffer&) = delete; + NativeBuffer& operator=(const NativeBuffer&) = delete; + + bool BeginAccess(std::unique_ptr& acquire_fence); + bool EndAccess(); + + bool is_valid() const { return handle_.is_valid(); } + + const ScopedNativeBufferHandle& handle() const { return handle_; } + const gfx::Size& size() const { return size_; } + bool is_primary_plane() const { return is_primary_plane_; } + uint32_t sequence_number() const { return sequence_number_; } + + private: + friend class base::RefCountedThreadSafe; + + ~NativeBuffer(); + + void Destroy(); + + const ScopedNativeBufferHandle handle_; + const gfx::Size size_; + const bool is_primary_plane_; + base::WeakPtr delegate_; + const uint32_t sequence_number_; + + bool is_accessing_ = false; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_NATIVE_BUFFER_H_ diff --git a/ui/gfx/ohos/native_buffer_manager.cc b/ui/gfx/ohos/native_buffer_manager.cc new file mode 100644 index 0000000000..e2050e39e3 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_manager.cc @@ -0,0 +1,216 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/native_buffer_manager.h" + +#include "base/check.h" +#include "base/containers/contains.h" +#include "base/containers/span.h" +#include "base/feature_list.h" +#include "base/logging.h" +#include "base/notreached.h" +#include "base/ranges/algorithm.h" +#include "ui/gfx/buffer_usage_util.h" +#include "ui/gfx/ohos/native_buffer.h" +#include "ui/gfx/ohos/native_buffer_queue_cache.h" +#include "ui/gfx/ohos/oh_native_buffer_compat.h" +#include "ui/gfx/ohos/oh_native_window_compat.h" + +namespace gfx { + +namespace { + +// If enabled, allow to use native buffers. +BASE_FEATURE(kAllowNativeBuffers, + "AllowNativeBuffers", + base::FEATURE_ENABLED_BY_DEFAULT); + +// Allows to store max 2 solid color buffer at the same time. +static constexpr size_t kMaxSolidColorBuffers = 2; + +constexpr gfx::BufferFormat kSupportedFormats[2]{gfx::BufferFormat::RGBA_8888, + gfx::BufferFormat::RGBX_8888}; + +// Returns the corresponding NativeBuffer format. +int32_t ToNativeBufferFormat(gfx::BufferFormat format) { + DCHECK(base::Contains(kSupportedFormats, format)); + + if (format == gfx::BufferFormat::RGBA_8888) { + return NATIVEBUFFER_PIXEL_FMT_RGBA_8888; + } else if (format == gfx::BufferFormat::RGBX_8888) { + return NATIVEBUFFER_PIXEL_FMT_RGBX_8888; + } + + NOTREACHED(); + return NATIVEBUFFER_PIXEL_FMT_RGBA_8888; +} + +} // namespace + +// static +bool NativeBufferManager::SupportsNativeBuffers() { + if (base::FeatureList::IsEnabled(kAllowNativeBuffers)) { + return OHNativeBufferCompat::Get().IsSupported() && + OHNativeWindowCompat::Get().IsSupported(); + } + return false; +} + +NativeBufferManager::NativeBufferManager() + : background_native_buffer_queue_cache_( + NativeBufferQueueCache::CreateBackgroundCache()) {} + +NativeBufferManager::~NativeBufferManager() = default; + +bool NativeBufferManager::CanCreateNativeBufferForFormat( + gfx::BufferFormat format) { + return base::Contains(kSupportedFormats, format); +} + +scoped_refptr NativeBufferManager::CreateNativeBuffer( + const gfx::Size& size, + gfx::BufferFormat format, + gfx::BufferUsage usage) { + int32_t native_format = ToNativeBufferFormat(format); + + // Set usage so that gpu can both read as a texture/write as a framebuffer + // attachment. + uint64_t native_usage = NATIVEBUFFER_USAGE_HW_TEXTURE | + NATIVEBUFFER_USAGE_HW_RENDER | + NATIVEBUFFER_USAGE_MEM_DMA; + + switch (usage) { + case gfx::BufferUsage::GPU_READ: + case gfx::BufferUsage::SCANOUT: + break; + + case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE: + case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE: + native_usage |= + NATIVEBUFFER_USAGE_CPU_READ | NATIVEBUFFER_USAGE_CPU_WRITE; + break; + + default: + LOG(ERROR) << "CreateNativeBuffer: unsupported buffer usage " + << gfx::BufferUsageToString(usage); + return nullptr; + } + + auto handle = CreateNativeBuffer(size, native_format, native_usage); + if (!handle.is_valid()) { + return nullptr; + } + + return base::MakeRefCounted(std::move(handle), size, + false /*is_primary_plane*/); +} + +ScopedNativeBufferHandle NativeBufferManager::CreateNativeBuffer( + const gfx::Size& size, + int32_t format, + uint64_t usage) { + OHNativeBuffer_Config config; + config.width = size.width(); + config.height = size.height(); + config.format = format; + config.usage = usage; + + OHNativeBuffer* native_buffer = OHNativeBufferCompat::Get().Allocate(&config); + if (!native_buffer) { + LOG(ERROR) << "Failed to allocate native buffer"; + return ScopedNativeBufferHandle(); + } + + ScopedNativeBufferHandle::Buffer buffer; + buffer.native_buffer = native_buffer; + + return ScopedNativeBufferHandle::Adopt(std::move(buffer)); +} + +ScopedNativeBufferHandle NativeBufferManager::GetOrCreateSolidColorBuffer( + const gfx::Size& size, + SkColor4f color) { + const auto it = + base::ranges::find_if(solid_color_buffers_, [&](const auto& buffer) { + return buffer.size == size && + buffer.color.toSkColor() == color.toSkColor(); + }); + + if (it != solid_color_buffers_.end()) { + // This is a prefect match so use this directly. + return it->handle.Clone(); + } + + // Worst case allocate a new solid color buffer. This definitely will occur on + // startup. + OHNativeBuffer_Config config; + config.width = size.width(); + config.height = size.height(); + config.format = NATIVEBUFFER_PIXEL_FMT_RGBA_8888; + config.usage = NATIVEBUFFER_USAGE_HW_TEXTURE | NATIVEBUFFER_USAGE_MEM_DMA | + NATIVEBUFFER_USAGE_CPU_READ | NATIVEBUFFER_USAGE_CPU_WRITE; + + OHNativeBuffer* native_buffer = OHNativeBufferCompat::Get().Allocate(&config); + if (!native_buffer) { + LOG(ERROR) << "Failed to allocate native buffer"; + return ScopedNativeBufferHandle(); + } + + ScopedNativeBufferHandle::Buffer buffer; + buffer.native_buffer = native_buffer; + + auto handle = ScopedNativeBufferHandle::Adopt(std::move(buffer)); + + // Get configuration about buffer to obtain stride. + OHNativeBufferCompat::Get().GetConfig(handle.native_buffer(), &config); + + void* address = nullptr; + if (int32_t error = + OHNativeBufferCompat::Get().Map(handle.native_buffer(), &address)) { + LOG(ERROR) << "Failed to map buffer: " << error; + return ScopedNativeBufferHandle(); + } + + // Premultiply the SkColor4f to support transparent quads. + SkColor4f premul{color[0] * color[3], color[1] * color[3], + color[2] * color[3], color[3]}; + + const uint32_t premul_rgba_bytes = premul.toBytes_RGBA(); + + auto pixel_span = base::make_span( + reinterpret_cast(&premul_rgba_bytes), sizeof(uint32_t)); + + for (int y = 0; y < size.height(); y++) { + // NOTE: config.stride is in bytes. + uint8_t* dst = reinterpret_cast(address) + config.stride * y; + + for (int x = 0; x < size.width(); x++) { + memcpy(dst + x * pixel_span.size(), pixel_span.data(), pixel_span.size()); + } + } + + if (int32_t error = + OHNativeBufferCompat::Get().Unmap(handle.native_buffer())) { + LOG(ERROR) << "Failed to unmap buffer: " << error; + return ScopedNativeBufferHandle(); + } + + while (solid_color_buffers_.size() > kMaxSolidColorBuffers) { + solid_color_buffers_.pop_front(); + } + + solid_color_buffers_.emplace_back(size, color, handle.Clone()); + + return handle; +} + +NativeBufferManager::SolidColorBuffer::SolidColorBuffer( + const gfx::Size size, + SkColor4f color, + ScopedNativeBufferHandle handle) + : size(size), color(color), handle(std::move(handle)) {} + +NativeBufferManager::SolidColorBuffer::~SolidColorBuffer() = default; + +} // namespace gfx diff --git a/ui/gfx/ohos/native_buffer_manager.h b/ui/gfx/ohos/native_buffer_manager.h new file mode 100644 index 0000000000..0bdd9d6d6a --- /dev/null +++ b/ui/gfx/ohos/native_buffer_manager.h @@ -0,0 +1,69 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_NATIVE_BUFFER_MANAGER_H_ +#define UI_GFX_OHOS_NATIVE_BUFFER_MANAGER_H_ + +#include + +#include "base/memory/scoped_refptr.h" +#include "third_party/skia/include/core/SkColor.h" +#include "ui/gfx/buffer_types.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" + +namespace gfx { + +class NativeBuffer; +class NativeBufferQueueCache; + +class GFX_EXPORT NativeBufferManager { + public: + static bool SupportsNativeBuffers(); + + NativeBufferManager(); + ~NativeBufferManager(); + + NativeBufferManager(const NativeBufferManager&) = delete; + NativeBufferManager& operator=(const NativeBufferManager&) = delete; + + bool CanCreateNativeBufferForFormat(gfx::BufferFormat format); + + // Creates a new native buffer. + scoped_refptr CreateNativeBuffer(const gfx::Size& size, + gfx::BufferFormat format, + gfx::BufferUsage usage); + + ScopedNativeBufferHandle CreateNativeBuffer(const gfx::Size& size, + int32_t format, + uint64_t usage); + + ScopedNativeBufferHandle GetOrCreateSolidColorBuffer(const gfx::Size& size, + SkColor4f color); + + NativeBufferQueueCache* background_native_buffer_queue_cache() { + return background_native_buffer_queue_cache_.get(); + } + + private: + std::unique_ptr background_native_buffer_queue_cache_; + + struct SolidColorBuffer { + SolidColorBuffer(const gfx::Size size, + SkColor4f color, + ScopedNativeBufferHandle handle); + ~SolidColorBuffer(); + + gfx::Size size; + SkColor4f color; + ScopedNativeBufferHandle handle; + }; + + std::deque solid_color_buffers_; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_NATIVE_BUFFER_MANAGER_H_ diff --git a/ui/gfx/ohos/native_buffer_queue.cc b/ui/gfx/ohos/native_buffer_queue.cc new file mode 100644 index 0000000000..8c4425c424 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_queue.cc @@ -0,0 +1,595 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/native_buffer_queue.h" + +#include "base/containers/contains.h" +#include "base/containers/span.h" +#include "base/feature_list.h" +#include "base/logging.h" +#include "third_party/ohos_ndk/includes/ohos_adapter/ohos_adapter_helper.h" +#include "third_party/skia/include/core/SkColor.h" +#include "ui/gfx/buffer_format_util.h" +#include "ui/gfx/ohos/native_buffer_manager.h" +#include "ui/gfx/ohos/native_buffer_utils.h" +#include "ui/gfx/ohos/oh_native_buffer_compat.h" +#include "ui/gfx/ohos/oh_native_window_compat.h" + +namespace gfx { + +namespace { + +// See SkiaOutputDeviceBufferQueue capabilities (number_of_buffers). +static constexpr int kMinBufferQueueSize = 3; +static constexpr int kMaxBufferQueueSize = 5; + +static constexpr int kInvalidFenceFD = -1; + +// The background solid color buffer for output presentation. +static constexpr gfx::Size kSolidColorBufferSize(1, 1); +static constexpr SkColor4f kSolidColorBufferColor = SkColors::kWhite; + +// TODO(v00863305): Remove this temporary workaround when RS will be able to +// release surface node's prebuffer with empty buffer queue cache. +static constexpr gfx::Size kSolidColorBufferSize2(2, 2); +static constexpr SkColor4f kSolidColorBufferColor2 = SkColors::kWhite; + +// If enabled, this feature don't allow to present local frames to display. +BASE_FEATURE(kDontPresentLocalFrames, + "DontPresentLocalFrames", + base::FEATURE_ENABLED_BY_DEFAULT); + +size_t GetEstimatedSizeInBytes(const gfx::Size& size) { + return gfx::BufferSizeForBufferFormat(size, gfx::BufferFormat::RGBA_8888); +} + +ScopedNativeBufferHandle CreateNativeWindowBuffer( + ScopedNativeBufferHandle handle) { + auto* ohnwbuffer = OHNativeWindowCompat::Get().CreateNativeWindowBuffer( + handle.native_buffer()); + if (!ohnwbuffer) { + LOG(ERROR) << "Failed to create native window buffer"; + return ScopedNativeBufferHandle(); + } + + ScopedNativeBufferHandle::Buffer nwbhbuffer; + nwbhbuffer.native_window_buffer = ohnwbuffer; + + return ScopedNativeBufferHandle::Adopt(std::move(nwbhbuffer)); +} + +} // namespace + +NativeBufferQueue::NativeBufferQueue(NativeBufferManager* buffer_manager, + gfx::ScopedOHNativeWindow window) + : buffer_manager_(buffer_manager), + window_(std::move(window)), + background_native_buffer_queue_cache_( + buffer_manager->background_native_buffer_queue_cache()), + supports_background_native_buffer_queue_cache_( + !!background_native_buffer_queue_cache_), + dont_present_local_frames_( + base::FeatureList::IsEnabled(kDontPresentLocalFrames)) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); +} + +NativeBufferQueue::~NativeBufferQueue() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + Destroy(); +} + +bool NativeBufferQueue::Initialize() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowHandleOptions( + window_.oh_native_window(), + NativeWindowOperation::GET_BUFFERQUEUE_SIZE, &queue_size_)) { + LOG(ERROR) << "Failed to query buffer queue size: " << error; + return false; + } + + if (queue_size_ < kMinBufferQueueSize || queue_size_ > kMaxBufferQueueSize) { + LOG(WARNING) << "Failed to fulfill the buffer queue size requirements: " + "unsupported buffer queue size= " + << queue_size_; + return false; + } + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowHandleOptions( + window_.oh_native_window(), NativeWindowOperation::GET_FORMAT, + &buffer_format_)) { + LOG(ERROR) << "Failed to query buffer format: " << error; + return false; + } + + if (buffer_format_ != NATIVEBUFFER_PIXEL_FMT_RGBA_8888) { + LOG(WARNING) << "Failed to fulfill the buffer format requirements: " + "unsupported buffer format= " + << buffer_format_; + return false; + } + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowHandleOptions( + window_.oh_native_window(), NativeWindowOperation::GET_USAGE, + &buffer_usage_)) { + LOG(ERROR) << "Failed to query buffer usage: " << error; + return false; + } + + return true; +} + +void NativeBufferQueue::Destroy() { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + if (supports_background_native_buffer_queue_cache_) { + if (cached_background_buffer_sequence_number_) { + background_native_buffer_queue_cache_->DeleteBuffer( + this, std::move(cached_background_buffer_sequence_number_).value()); + } + + background_buffer_handle_.Reset(); + background_buffer_sequence_number_.reset(); + background_solid_color_buffer_handle_.Reset(); + background_solid_color_buffer_handle2_.Reset(); + } + + buffers_.clear(); + + if (has_established_connection_to_buffer_queue_) { + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetWindowAdapterInstance() + .NativeWindowSurfaceCleanCache( + reinterpret_cast(window_.oh_native_window())); + } + + weak_factory_.InvalidateWeakPtrs(); +} + +bool NativeBufferQueue::Resize(const gfx::Size& size) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + + if (buffer_size_ == size) { + return true; + } + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowHandleOptions( + window_.oh_native_window(), + NativeWindowOperation::SET_BUFFER_GEOMETRY, size.width(), + size.height())) { + LOG(ERROR) << "Failed to set buffer geometry: " << error; + return false; + } + + buffer_size_ = size; + + if (supports_background_native_buffer_queue_cache_) { + if (cached_background_buffer_sequence_number_) { + background_native_buffer_queue_cache_->DeleteBuffer( + this, std::move(cached_background_buffer_sequence_number_).value()); + } + + background_buffer_handle_.Reset(); + background_buffer_sequence_number_.reset(); + } + + has_pending_buffer_queue_cache_clean_ = true; + return true; +} + +void NativeBufferQueue::SetVisibility(bool visibility) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + if (visibility_ == visibility) { + return; + } + + visibility_ = visibility; + + if (visibility) { + if (supports_background_native_buffer_queue_cache_) { + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetWindowAdapterInstance() + .NativeWindowSurfaceCleanCache( + reinterpret_cast(window_.oh_native_window())); + + if (cached_background_buffer_sequence_number_) { + background_buffer_handle_ = + background_native_buffer_queue_cache_->TakeBuffer( + this, cached_background_buffer_sequence_number_.value()); + background_buffer_sequence_number_ = + std::move(cached_background_buffer_sequence_number_); + } + } + } else { + has_pending_buffer_queue_cache_clean_ = true; + } +} + +scoped_refptr NativeBufferQueue::AllocateBuffer( + const gfx::Size& size) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + if (buffer_size_ != size) { + LOG(ERROR) << "Failed to allocate buffer - buffer geometry" + "size does not match requested size: " + << buffer_size_.ToString() << " vs " << size.ToString(); + return nullptr; + } + + ScopedNativeBufferHandle handle; + + if (supports_background_native_buffer_queue_cache_) { + // Try to reuse background buffer for the last allocation request. + if (background_buffer_handle_.is_valid() && + (buffers_.size() == static_cast((queue_size_ - 1)))) { + handle = background_buffer_handle_.Clone(); + DCHECK(handle.is_valid()); + } + } + + // Fallback to new native buffer allocation. + if (!handle.is_valid()) { + handle = CreateBuffer(); + if (!handle.is_valid()) { + return nullptr; + } + } + + uint32_t sequence_number = GetNativeBufferSequenceNumber(handle); + + DCHECK(!base::Contains(buffers_, sequence_number)); + buffers_.emplace(sequence_number, + BufferEntry{BufferEntry::Status::ALLOCATED}); + + return base::MakeRefCounted(std::move(handle), size, + true /*is_primary_plane*/, + weak_factory_.GetWeakPtr()); +} + +bool NativeBufferQueue::Present(scoped_refptr buffer, + const gfx::Rect& damage_rect, + std::unique_ptr release_fence, + bool local_frame) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(base::Contains(buffers_, buffer->sequence_number())); + DCHECK(base::Contains(accessing_buffers_, buffer->sequence_number())); + + auto it = buffers_.find(buffer->sequence_number()); + + if (it->second.status != BufferEntry::Status::DEQUEUED) { + LOG(ERROR) << "Present: invalid buffer status= " + << static_cast(it->second.status); + return false; + } + + if (dont_present_local_frames_ && local_frame) { + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowAbortBuffer( + window_.oh_native_window(), + buffer->handle().native_window_buffer())) { + DLOG(ERROR) << "Failed to abort buffer: " << error; + return false; + } + } else { + int fence = release_fence + ? release_fence->GetGpuFenceHandle().owned_fd.get() + : kInvalidFenceFD; + + struct Region region; + struct Region::Rect rect; + + // BufferQueueSurfacePresenter's surface origin is top-left, but RS + // expects bottom-left and do Y coordinate flip, so we must compensate it. + // See RSBaseRenderUtil::ConsumeAndUpdateBuffer for more information. + rect.x = damage_rect.x(); + rect.y = buffer->size().height() - damage_rect.y() - damage_rect.height(); + rect.w = damage_rect.width(); + rect.h = damage_rect.height(); + region.rects = ▭ + region.rectNumber = 1; + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowFlushBuffer( + window_.oh_native_window(), buffer->handle().native_window_buffer(), + fence, region)) { + DLOG(ERROR) << "Failed to flush buffer: " << error; + return false; + } + + if (supports_background_native_buffer_queue_cache_) { + if (background_buffer_handle_.is_valid()) { + background_buffer_handle_.Reset(); + } + background_buffer_sequence_number_ = buffer->sequence_number(); + } + } + + it->second.status = BufferEntry::Status::QUEUED; + + buffer->EndAccess(); + return true; +} + +void NativeBufferQueue::OnBufferEvicted(uint32_t sequence_number, + ScopedNativeBufferHandle handle) { + DCHECK(supports_background_native_buffer_queue_cache_); + DCHECK_EQ(sequence_number, cached_background_buffer_sequence_number_.value()); + DCHECK(handle.is_valid()); + cached_background_buffer_sequence_number_.reset(); + handle.Reset(); + + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetWindowAdapterInstance() + .NativeWindowSurfaceCleanCache( + reinterpret_cast(window_.oh_native_window())); + + // Allocate and present the solid color buffer on display in background. + if (!background_solid_color_buffer_handle_.is_valid()) { + background_solid_color_buffer_handle_ = CreateSolidColorBuffer(); + } + DCHECK(background_solid_color_buffer_handle_.is_valid()); + + PresentLocally(background_solid_color_buffer_handle_, kSolidColorBufferSize, + gfx::Rect(kSolidColorBufferSize)); + + // Allocate and present the second solid color buffer on display in background + // to release surface node's prebuffer. + if (!background_solid_color_buffer_handle2_.is_valid()) { + background_solid_color_buffer_handle2_ = CreateSolidColorBuffer2(); + } + DCHECK(background_solid_color_buffer_handle2_.is_valid()); + + PresentLocally(background_solid_color_buffer_handle2_, kSolidColorBufferSize2, + gfx::Rect(kSolidColorBufferSize2)); +} + +bool NativeBufferQueue::OnBeginAccess( + gfx::NativeBuffer* buffer, + std::unique_ptr& acquire_fence) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(base::Contains(buffers_, buffer->sequence_number())); + + if (base::Contains(accessing_buffers_, buffer->sequence_number())) { + return false; + } + + auto it = buffers_.find(buffer->sequence_number()); + DCHECK(it != buffers_.end()); + + if (it->second.status == BufferEntry::Status::ALLOCATED) { + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowAttachBuffer( + window_.oh_native_window(), + buffer->handle().native_window_buffer())) { + LOG(ERROR) << "Failed to attach buffer: " << error; + return false; + } + + it->second.status = BufferEntry::Status::DEQUEUED; + + accessing_buffers_.emplace(buffer->sequence_number()); + return true; + } else if (it->second.status == BufferEntry::Status::DEQUEUED) { + // Buffer was dequeued early due to frame dropping by RS. + accessing_buffers_.emplace(buffer->sequence_number()); + return true; + } else if (it->second.status == BufferEntry::Status::QUEUED) { + OHNativeWindowBuffer* native_window_buffer = nullptr; + base::ScopedFD sync_fd; + + bool dequeue_until_done = false; + do { + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowRequestBuffer( + window_.oh_native_window(), &native_window_buffer, + base::ScopedFD::Receiver(sync_fd).get())) { + LOG(ERROR) << "Failed to request buffer: " << error; + return false; + } + + has_established_connection_to_buffer_queue_ = true; + + ScopedNativeBufferHandle::Buffer nwbhbuffer; + nwbhbuffer.native_window_buffer = native_window_buffer; + + auto nwbhandle = ScopedNativeBufferHandle::Create(std::move(nwbhbuffer)); + + auto sequence_number = GetNativeBufferSequenceNumber(nwbhandle); + + if (buffer->sequence_number() != sequence_number) { + // Native buffer queue should return buffers in presenting + // order, but due to existed frame dropping mechanism in RS + // sometimes buffers could be dequeued in not continuous + // sequence order. See RSBaseRenderUtil::DropFrameProcess for + // more information. + auto nwb_it = buffers_.find(sequence_number); + if (nwb_it != buffers_.end()) { + nwb_it->second.status = BufferEntry::Status::DEQUEUED; + } else { + LOG(ERROR) << "Unexpected buffer was returned on request"; + return false; + } + } else { + dequeue_until_done = true; + } + } while (!dequeue_until_done); + + it->second.status = BufferEntry::Status::DEQUEUED; + + accessing_buffers_.emplace(buffer->sequence_number()); + + if (sync_fd.is_valid()) { + gfx::GpuFenceHandle fence_handle; + fence_handle.owned_fd = std::move(sync_fd); + + acquire_fence = std::make_unique(std::move(fence_handle)); + } + + return true; + } + + LOG(ERROR) << "OnBeginAccess: invalid buffer status= " + << static_cast(it->second.status); + return false; +} + +bool NativeBufferQueue::OnEndAccess(gfx::NativeBuffer* buffer) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(base::Contains(buffers_, buffer->sequence_number())); + + if (!base::Contains(accessing_buffers_, buffer->sequence_number())) { + return false; + } + + accessing_buffers_.erase(buffer->sequence_number()); + + auto it = buffers_.find(buffer->sequence_number()); + DCHECK(it != buffers_.end()); + + if (it->second.status == BufferEntry::Status::ALLOCATED) { + // Do nothing because the buffer was allocated locally and was + // not be attached to or dequeued from buffer queue yet. + return true; + } else if (it->second.status == BufferEntry::Status::DEQUEUED) { + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowAbortBuffer( + window_.oh_native_window(), + buffer->handle().native_window_buffer())) { + LOG(ERROR) << "Failed to abort buffer: " << error; + return false; + } + + it->second.status = BufferEntry::Status::QUEUED; + return true; + } else if (it->second.status == BufferEntry::Status::QUEUED) { + // Do nothing because the buffer was already queued on Present() + // call. + return true; + } + + LOG(ERROR) << "OnEndAccess: invalid buffer status= " + << static_cast(it->second.status); + return false; +} + +void NativeBufferQueue::OnDestroy(gfx::NativeBuffer* buffer) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK(base::Contains(buffers_, buffer->sequence_number())); + + if (supports_background_native_buffer_queue_cache_ && !visibility_) { + if (!background_buffer_handle_.is_valid() && + background_buffer_sequence_number_ && + background_buffer_sequence_number_.value() == + buffer->sequence_number()) { + background_buffer_handle_ = buffer->handle().Clone(); + } + } + + buffers_.erase(buffer->sequence_number()); + + if (buffers_.empty()) { + if (has_pending_buffer_queue_cache_clean_) { + has_pending_buffer_queue_cache_clean_ = false; + // TODO(v00863305): Remove after required change on buffer queue API. + // To be able to clean cache need to have established + // connection from client producer otherwise error reply will be + // return and buffer cache will be not cleaned on RS side. To + // satisfy required criteria let's request temporary buffer from + // buffer queue which will be released later on the clean cache + // operation. See BufferQueueProducer::CheckConnectLocked for + // more information. + if (!has_established_connection_to_buffer_queue_) { + OHNativeWindowBuffer* native_window_buffer = nullptr; + base::ScopedFD sync_fd; + + if (int32_t error = + OHNativeWindowCompat::Get().NativeWindowRequestBuffer( + window_.oh_native_window(), &native_window_buffer, + base::ScopedFD::Receiver(sync_fd).get())) { + LOG(ERROR) << "Failed to request buffer: " << error; + } + + has_established_connection_to_buffer_queue_ = true; + } + + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetWindowAdapterInstance() + .NativeWindowSurfaceCleanCache( + reinterpret_cast(window_.oh_native_window())); + } + + if (supports_background_native_buffer_queue_cache_ && !visibility_) { + if (background_buffer_handle_.is_valid()) { + PresentLocally(background_buffer_handle_, buffer_size_, + gfx::Rect(buffer_size_)); + + cached_background_buffer_sequence_number_ = + std::move(background_buffer_sequence_number_); + background_native_buffer_queue_cache_->PutBuffer( + this, cached_background_buffer_sequence_number_.value(), + std::move(background_buffer_handle_), + GetEstimatedSizeInBytes(buffer_size_)); + } + } + } +} + +ScopedNativeBufferHandle NativeBufferQueue::CreateBuffer() { + auto handle = buffer_manager_->CreateNativeBuffer( + buffer_size_, buffer_format_, buffer_usage_); + if (!handle.is_valid()) { + return ScopedNativeBufferHandle(); + } + + return CreateNativeWindowBuffer(std::move(handle)); +} + +ScopedNativeBufferHandle NativeBufferQueue::CreateSolidColorBuffer() { + auto handle = buffer_manager_->GetOrCreateSolidColorBuffer( + kSolidColorBufferSize, kSolidColorBufferColor); + if (!handle.is_valid()) { + return ScopedNativeBufferHandle(); + } + + return CreateNativeWindowBuffer(std::move(handle)); +} + +ScopedNativeBufferHandle NativeBufferQueue::CreateSolidColorBuffer2() { + auto handle = buffer_manager_->GetOrCreateSolidColorBuffer( + kSolidColorBufferSize2, kSolidColorBufferColor2); + if (!handle.is_valid()) { + return ScopedNativeBufferHandle(); + } + + return CreateNativeWindowBuffer(std::move(handle)); +} + +bool NativeBufferQueue::PresentLocally(const ScopedNativeBufferHandle& handle, + const gfx::Size& size, + const gfx::Rect& damage_rect) { + DCHECK(handle.is_valid()); + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowAttachBuffer( + window_.oh_native_window(), handle.native_window_buffer())) { + LOG(ERROR) << "Failed to attach buffer: " << error; + return false; + } + + struct Region region; + struct Region::Rect rect; + // BufferQueueSurfacePresenter's surface origin is top-left, but RS expects + // bottom-left and do Y coordinate flip, so we must compensate it. + // See RSBaseRenderUtil::ConsumeAndUpdateBuffer for more information. + rect.x = damage_rect.x(); + rect.y = size.height() - damage_rect.y() - damage_rect.height(); + rect.w = damage_rect.width(); + rect.h = damage_rect.height(); + region.rects = ▭ + region.rectNumber = 1; + + if (int32_t error = OHNativeWindowCompat::Get().NativeWindowFlushBuffer( + window_.oh_native_window(), handle.native_window_buffer(), + kInvalidFenceFD, region)) { + LOG(ERROR) << "Failed to flush buffer: " << error; + return false; + } + + return true; +} + +} // namespace gfx diff --git a/ui/gfx/ohos/native_buffer_queue.h b/ui/gfx/ohos/native_buffer_queue.h new file mode 100644 index 0000000000..7700def44c --- /dev/null +++ b/ui/gfx/ohos/native_buffer_queue.h @@ -0,0 +1,113 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_H_ +#define UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_H_ + +#include "base/containers/flat_map.h" +#include "base/containers/flat_set.h" +#include "base/memory/raw_ptr.h" +#include "base/threading/thread_checker.h" +#include "third_party/abseil-cpp/absl/types/optional.h" +#include "ui/gfx/geometry/rect.h" +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/gpu_fence.h" +#include "ui/gfx/ohos/native_buffer.h" +#include "ui/gfx/ohos/native_buffer_queue_cache.h" +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" +#include "ui/gfx/ohos/scoped_oh_native_window.h" + +namespace gfx { + +class NativeBufferManager; + +// This represents a tiny wrapper above platform buffer queue. +class GFX_EXPORT NativeBufferQueue : public NativeBufferQueueCache::Client, + public NativeBuffer::Delegate { + public: + NativeBufferQueue(NativeBufferManager* buffer_manager, + ScopedOHNativeWindow window); + ~NativeBufferQueue(); + + NativeBufferQueue(const NativeBufferQueue&) = delete; + NativeBufferQueue& operator=(const NativeBufferQueue&) = delete; + + bool Initialize(); + void Destroy(); + + bool Resize(const gfx::Size& size); + void SetVisibility(bool visibility); + + scoped_refptr AllocateBuffer(const gfx::Size& size); + + bool Present(scoped_refptr buffer, + const gfx::Rect& damage_rect, + std::unique_ptr release_fence, + bool local_frame = false); + + // NativeBufferQueueCache::Client implementation. + void OnBufferEvicted(uint32_t sequence_number, + ScopedNativeBufferHandle handle) override; + + // NativeBuffer::Delegate implementation. + bool OnBeginAccess(gfx::NativeBuffer* buffer, + std::unique_ptr& acquire_fence) override; + bool OnEndAccess(gfx::NativeBuffer* buffer) override; + void OnDestroy(gfx::NativeBuffer* buffer) override; + + int GetQueueSize() const { return queue_size_; } + + private: + // Creates a native buffer. + ScopedNativeBufferHandle CreateBuffer(); + // Creates a native buffer of the specified color. + ScopedNativeBufferHandle CreateSolidColorBuffer(); + ScopedNativeBufferHandle CreateSolidColorBuffer2(); + + bool PresentLocally(const ScopedNativeBufferHandle& handle, + const gfx::Size& size, + const gfx::Rect& damage_rect); + + const raw_ptr buffer_manager_; + ScopedOHNativeWindow window_; + + const raw_ptr background_native_buffer_queue_cache_; + const bool supports_background_native_buffer_queue_cache_; + + ScopedNativeBufferHandle background_buffer_handle_; + absl::optional background_buffer_sequence_number_; + absl::optional cached_background_buffer_sequence_number_; + + ScopedNativeBufferHandle background_solid_color_buffer_handle_; + ScopedNativeBufferHandle background_solid_color_buffer_handle2_; + + const bool dont_present_local_frames_; + + int32_t queue_size_; + int32_t buffer_format_; + uint64_t buffer_usage_; + gfx::Size buffer_size_; + + bool visibility_ = true; + + struct BufferEntry { + enum class Status { ALLOCATED, DEQUEUED, QUEUED }; + + Status status; + }; + + base::flat_map buffers_; + base::flat_set accessing_buffers_; + + bool has_established_connection_to_buffer_queue_ = false; + bool has_pending_buffer_queue_cache_clean_ = false; + + THREAD_CHECKER(thread_checker_); + + base::WeakPtrFactory weak_factory_{this}; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_H_ diff --git a/ui/gfx/ohos/native_buffer_queue_cache.cc b/ui/gfx/ohos/native_buffer_queue_cache.cc new file mode 100644 index 0000000000..0d73f0e320 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_queue_cache.cc @@ -0,0 +1,274 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/native_buffer_queue_cache.h" + +#include "base/check.h" +#include "base/feature_list.h" +#include "base/logging.h" +#include "base/memory/ptr_util.h" +#include "base/metrics/field_trial_params.h" +#include "base/strings/stringprintf.h" +#include "base/trace_event/memory_dump_manager.h" +#include "third_party/ohos_ndk/includes/ohos_adapter/ohos_adapter_helper.h" + +namespace gfx { + +namespace { + +// If enabled, allows to use background native buffer queue cache of +// contentful primary plane buffers to present them temporary until +// the new UI compositor frame will be ready. +BASE_FEATURE(kBackgroundNativeBufferQueueCache, + "BackgroundNativeBufferQueueCache", + base::FEATURE_ENABLED_BY_DEFAULT); + +// The max amount of background primary plane buffers to keep in the cache. +constexpr base::FeatureParam kBackgroundNativeBufferQueueCacheMaxCount{ + &kBackgroundNativeBufferQueueCache, "max-count", 20}; + +// The max size in MB allowed for the internal LRU cache of buffers. +constexpr base::FeatureParam kBackgroundNativeBufferQueueCacheMemoryLimit{ + &kBackgroundNativeBufferQueueCache, "memory-limit", 100}; + +// The max number of seconds since a buffer has been used before we +// will try to evict it from the cache. +const base::FeatureParam + kBackgroundNativeBufferQueueCacheExpirationDelay{ + &kBackgroundNativeBufferQueueCache, "expiration-delay", + base::Minutes(15)}; + +} // namespace + +// static +std::unique_ptr +NativeBufferQueueCache::CreateBackgroundCache() { + static bool persist_web_background_native_buffer_queue_cache_enabled = + OHOS::NWeb::OhosAdapterHelper::GetInstance() + .GetSystemPropertiesInstance() + .GetBoolParameter( + "persist.web.background_native_buffer_queue_cache.enable", true); + if (!persist_web_background_native_buffer_queue_cache_enabled) { + return nullptr; + } + + if (!base::FeatureList::IsEnabled(kBackgroundNativeBufferQueueCache)) { + return nullptr; + } + + return base::WrapUnique(new NativeBufferQueueCache( + kBackgroundNativeBufferQueueCacheMemoryLimit.Get() * 1024 * 1024, + kBackgroundNativeBufferQueueCacheMaxCount.Get(), + kBackgroundNativeBufferQueueCacheExpirationDelay.Get())); +} + +NativeBufferQueueCache::NativeBufferQueueCache(size_t max_memory_usage_bytes, + size_t max_buffer_count, + base::TimeDelta expiration_delay) + : max_memory_usage_bytes_(max_memory_usage_bytes), + max_buffer_count_(max_buffer_count), + expiration_delay_(expiration_delay), + task_runner_(base::SingleThreadTaskRunner::GetCurrentDefault()) { + DCHECK(task_runner_); + + base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( + this, "gfx::NativeBufferQueueCache", task_runner_); + + memory_pressure_listener_ = std::make_unique( + FROM_HERE, base::BindRepeating(&NativeBufferQueueCache::OnMemoryPressure, + weak_ptr_factory_.GetWeakPtr())); +} + +NativeBufferQueueCache::~NativeBufferQueueCache() { + base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( + this); + + base::AutoLock lock(lock_); + + EvictBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); + DCHECK_EQ(0u, buffers_.size()); + DCHECK_EQ(0u, indexing_.size()); + DCHECK_EQ(0u, total_memory_usage_bytes_); + DCHECK_EQ(0u, total_buffer_count_); +} + +bool NativeBufferQueueCache::OnMemoryDump( + const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) { + base::AutoLock lock(lock_); + + std::string dump_name = + base::StringPrintf("gfx/native_buffer_queue_cache/0x%" PRIXPTR, + reinterpret_cast(this)); + base::trace_event::MemoryAllocatorDump* dump = + pmd->CreateAllocatorDump(dump_name); + dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, + base::trace_event::MemoryAllocatorDump::kUnitsBytes, + total_memory_usage_bytes_); + return true; +} + +void NativeBufferQueueCache::OnMemoryPressure( + base::MemoryPressureListener::MemoryPressureLevel level) { + base::AutoLock lock(lock_); + + switch (level) { + case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE: + case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE: + break; + case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL: + // Evict all buffers, regardless of how recently they were used. + EvictBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); + break; + } +} + +void NativeBufferQueueCache::PutBuffer(Client* client, + uint32_t sequence_number, + ScopedNativeBufferHandle handle, + size_t estimated_size) { + base::AutoLock lock(lock_); + + buffers_.emplace_front(client, sequence_number, std::move(handle), + estimated_size); + auto result = indexing_.emplace(CacheEntryKey{client, sequence_number}, + buffers_.begin()); + DCHECK(result.second); + + total_memory_usage_bytes_ += estimated_size; + ++total_buffer_count_; + + // Reduce memory of unused buffers to stay within the limit. + ReduceMemoryUsage(); + // Now that we have evictable buffers, schedule an eviction call. + ScheduleEvictExpiredBuffers(); +} + +ScopedNativeBufferHandle NativeBufferQueueCache::TakeBuffer( + Client* client, + uint32_t sequence_number) { + base::AutoLock lock(lock_); + + const auto& it = indexing_.find(CacheEntryKey{client, sequence_number}); + if (it == indexing_.end()) { + return ScopedNativeBufferHandle(); + } + + DCHECK_GE(total_memory_usage_bytes_, it->second->estimated_size); + total_memory_usage_bytes_ -= it->second->estimated_size; + --total_buffer_count_; + + ScopedNativeBufferHandle handle = std::move(it->second->handle); + + buffers_.erase(it->second); + indexing_.erase(it); + return handle; +} + +void NativeBufferQueueCache::DeleteBuffer(Client* client, + uint32_t sequence_number) { + base::AutoLock lock(lock_); + + const auto& it = indexing_.find(CacheEntryKey{client, sequence_number}); + if (it == indexing_.end()) { + return; + } + + DCHECK_GE(total_memory_usage_bytes_, it->second->estimated_size); + total_memory_usage_bytes_ -= it->second->estimated_size; + --total_buffer_count_; + + buffers_.erase(it->second); + indexing_.erase(it); +} + +void NativeBufferQueueCache::ReduceMemoryUsage() { + while (!buffers_.empty()) { + if (total_buffer_count_ <= max_buffer_count_ && + total_memory_usage_bytes_ <= max_memory_usage_bytes_) { + break; + } + + auto& buffer = buffers_.back(); + + DCHECK_GE(total_memory_usage_bytes_, buffer.estimated_size); + total_memory_usage_bytes_ -= buffer.estimated_size; + --total_buffer_count_; + + buffer.client->OnBufferEvicted(buffer.sequence_number, + std::move(buffer.handle)); + + indexing_.erase(CacheEntryKey{buffer.client, buffer.sequence_number}); + buffers_.pop_back(); + } +} + +void NativeBufferQueueCache::ScheduleEvictExpiredBuffers() { + if (evict_expired_buffers_pending_ || buffers_.empty()) { + return; + } + + evict_expired_buffers_pending_ = true; + + // Schedule a call to EvictExpiredBuffers at the time when the LRU buffer + // should be released. + base::TimeTicks expiration_time = + buffers_.back().timestamp + expiration_delay_; + task_runner_->PostDelayedTask( + FROM_HERE, + base::BindOnce(&NativeBufferQueueCache::EvictExpiredBuffers, + weak_ptr_factory_.GetWeakPtr()), + expiration_time - base::TimeTicks::Now()); +} + +void NativeBufferQueueCache::EvictExpiredBuffers() { + base::AutoLock lock(lock_); + + evict_expired_buffers_pending_ = false; + + if (buffers_.empty()) { + return; + } + + base::TimeTicks current_time = base::TimeTicks::Now(); + EvictBuffersNotUsedSince(current_time - expiration_delay_); + ScheduleEvictExpiredBuffers(); +} + +void NativeBufferQueueCache::EvictBuffersNotUsedSince(base::TimeTicks time) { + while (!buffers_.empty()) { + auto& buffer = buffers_.back(); + + // Note: Back buffer is guaranteed to be LRU so we can stop releasing + // buffers as soon as we find a buffer that has been used since |time|. + if (buffer.timestamp > time) { + return; + } + + DCHECK_GE(total_memory_usage_bytes_, buffer.estimated_size); + total_memory_usage_bytes_ -= buffer.estimated_size; + --total_buffer_count_; + + buffer.client->OnBufferEvicted(buffer.sequence_number, + std::move(buffer.handle)); + + indexing_.erase(CacheEntryKey{buffer.client, buffer.sequence_number}); + buffers_.pop_back(); + } +} + +NativeBufferQueueCache::CacheEntry::CacheEntry::CacheEntry( + Client* client, + uint32_t sequence_number, + ScopedNativeBufferHandle handle, + size_t estimated_size) + : client(client), + sequence_number(sequence_number), + handle(std::move(handle)), + estimated_size(estimated_size), + timestamp(base::TimeTicks::Now()) {} + +NativeBufferQueueCache::CacheEntry::~CacheEntry() = default; + +} // namespace gfx diff --git a/ui/gfx/ohos/native_buffer_queue_cache.h b/ui/gfx/ohos/native_buffer_queue_cache.h new file mode 100644 index 0000000000..7f9b6681b7 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_queue_cache.h @@ -0,0 +1,127 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_CACHE_H_ +#define UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_CACHE_H_ + +#include +#include +#include + +#include "base/memory/memory_pressure_listener.h" +#include "base/memory/scoped_refptr.h" +#include "base/memory/weak_ptr.h" +#include "base/synchronization/lock.h" +#include "base/task/single_thread_task_runner.h" +#include "base/time/time.h" +#include "base/trace_event/memory_dump_provider.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" + +namespace gfx { + +class GFX_EXPORT NativeBufferQueueCache + : public base::trace_event::MemoryDumpProvider { + public: + class Client { + public: + virtual ~Client() = default; + + virtual void OnBufferEvicted(uint32_t sequence_number, + ScopedNativeBufferHandle handle) = 0; + }; + + static std::unique_ptr CreateBackgroundCache(); + + ~NativeBufferQueueCache(); + + NativeBufferQueueCache(const NativeBufferQueueCache&) = delete; + NativeBufferQueueCache& operator=(const NativeBufferQueueCache&) = delete; + + // base::trace_event::MemoryDumpProvider implementation. + bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) override; + + void OnMemoryPressure( + base::MemoryPressureListener::MemoryPressureLevel level); + + // Puts a buffer for a |client| to the cache. + void PutBuffer(Client* client, + uint32_t sequence_number, + ScopedNativeBufferHandle handle, + size_t estimated_size) LOCKS_EXCLUDED(lock_); + + // Takes ownership of |client|'s buffer and removes from the cache. + ScopedNativeBufferHandle TakeBuffer(Client* client, uint32_t sequence_number) + LOCKS_EXCLUDED(lock_); + + // Deletes a buffer for a |client| from the cache. + void DeleteBuffer(Client* client, uint32_t sequence_number) + LOCKS_EXCLUDED(lock_); + + private: + struct CacheEntry { + public: + CacheEntry(Client* client, + uint32_t sequence_number, + ScopedNativeBufferHandle handle, + size_t estimated_size); + ~CacheEntry(); + + raw_ptr client; + uint32_t sequence_number; + ScopedNativeBufferHandle handle; + size_t estimated_size; + base::TimeTicks timestamp; + }; + + struct CacheEntryKey { + raw_ptr client; + uint32_t sequence_number; + }; + + struct CacheEntryKeyCompare { + bool operator()(const CacheEntryKey& lhs, const CacheEntryKey& rhs) const { + return std::tie(lhs.client, lhs.sequence_number) < + std::tie(rhs.client, rhs.sequence_number); + } + }; + + NativeBufferQueueCache(size_t max_memory_usage_bytes, + size_t max_buffer_count, + base::TimeDelta expiration_delay); + + void ReduceMemoryUsage() EXCLUSIVE_LOCKS_REQUIRED(lock_); + void ScheduleEvictExpiredBuffers() EXCLUSIVE_LOCKS_REQUIRED(lock_); + void EvictExpiredBuffers(); + void EvictBuffersNotUsedSince(base::TimeTicks time) + EXCLUSIVE_LOCKS_REQUIRED(lock_); + + const size_t max_memory_usage_bytes_; + const size_t max_buffer_count_; + const base::TimeDelta expiration_delay_; + + scoped_refptr task_runner_; + + // |lock_| must be acquired when accessing the following members. + base::Lock lock_; + + size_t total_memory_usage_bytes_ GUARDED_BY(lock_) = 0; + size_t total_buffer_count_ GUARDED_BY(lock_) = 0; + bool evict_expired_buffers_pending_ GUARDED_BY(lock_) = false; + + // Holds most recently used buffers at the front of the list. + std::list buffers_ GUARDED_BY(lock_); + std::map::iterator, CacheEntryKeyCompare> + indexing_ GUARDED_BY(lock_); + + std::unique_ptr memory_pressure_listener_; + + base::WeakPtrFactory weak_ptr_factory_{this}; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_CACHE_H_ diff --git a/ui/gfx/ohos/native_buffer_queue_surface.h b/ui/gfx/ohos/native_buffer_queue_surface.h new file mode 100644 index 0000000000..c42f72f566 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_queue_surface.h @@ -0,0 +1,26 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_SURFACE_H_ +#define UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_SURFACE_H_ + +#include "base/memory/scoped_refptr.h" +#include "ui/gfx/geometry/size.h" + +namespace gfx { + +class NativeBuffer; + +class NativeBufferQueueSurface { + public: + virtual ~NativeBufferQueueSurface() = default; + + // Allocates a new native buffer for the primary plane. + virtual scoped_refptr AllocatePrimaryPlaneBuffer( + const gfx::Size& size) = 0; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_NATIVE_BUFFER_QUEUE_SURFACE_H_ diff --git a/ui/gfx/ohos/native_buffer_utils.cc b/ui/gfx/ohos/native_buffer_utils.cc new file mode 100644 index 0000000000..18c7818de6 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_utils.cc @@ -0,0 +1,32 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/native_buffer_utils.h" + +#include "base/logging.h" +#include "ui/gfx/ohos/oh_native_buffer_compat.h" +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" + +namespace gfx { + +uint32_t GetNativeBufferSequenceNumber(const ScopedNativeBufferHandle& handle) { + OHNativeBuffer* native_buffer = nullptr; + if (handle.native_buffer()) { + native_buffer = handle.native_buffer(); + } else if (handle.native_window_buffer()) { + if (int32_t error = OHNativeBufferCompat::Get().FromNativeWindowBuffer( + handle.native_window_buffer(), &native_buffer)) { + LOG(ERROR) << "Failed to create native buffer: " << error; + return kInvalidNativeBufferSequenceNumber; + } + } + + if (native_buffer) { + return OHNativeBufferCompat::Get().GetSequenceNumber(native_buffer); + } + + return kInvalidNativeBufferSequenceNumber; +} + +} // namespace gfx diff --git a/ui/gfx/ohos/native_buffer_utils.h b/ui/gfx/ohos/native_buffer_utils.h new file mode 100644 index 0000000000..eb410ff508 --- /dev/null +++ b/ui/gfx/ohos/native_buffer_utils.h @@ -0,0 +1,25 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_NATIVE_BUFFER_UTILS_H_ +#define UI_GFX_OHOS_NATIVE_BUFFER_UTILS_H_ + +#include +#include + +#include "ui/gfx/gfx_export.h" + +namespace gfx { + +class ScopedNativeBufferHandle; + +static constexpr uint32_t kInvalidNativeBufferSequenceNumber = + std::numeric_limits::max(); + +GFX_EXPORT uint32_t +GetNativeBufferSequenceNumber(const ScopedNativeBufferHandle& handle); + +} // namespace gfx + +#endif // UI_GFX_OHOS_NATIVE_BUFFER_UTILS_H_ diff --git a/ui/gfx/ohos/oh_native_buffer_abi.h b/ui/gfx/ohos/oh_native_buffer_abi.h new file mode 100755 index 0000000000..5910aa1b2a --- /dev/null +++ b/ui/gfx/ohos/oh_native_buffer_abi.h @@ -0,0 +1,150 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_OH_NATIVE_BUFFER_ABI_H_ +#define UI_GFX_OHOS_OH_NATIVE_BUFFER_ABI_H_ + +// Minimal binary interface definitions for OHNativeBuffer based on +// include/native_buffer/native_buffer.h from the OHOS NDK for platform +// level 8+. This is only intended for use from the OHNativeBufferCompat +// wrapper for building without NDK platform level support, it is not a +// general-use header and is not complete. +// +// Delete this file when third_party/ohos_ndk/ is updated to a version +// that contains the native_buffer/native_buffer.h file. +// +// Please refer to the API documentation for details: +// https://developer.huawei.com/ndk/reference/native__buffer_8h.html + +#include + +// Use "C" linkage to match the original header file. This isn't strictly +// required since the file is not declaring global functions, but the types +// should remain in the global namespace for compatibility, and it's a reminder +// that forward declarations elsewhere should use "extern "C" to avoid +// namespace issues. +extern "C" { + +typedef struct OH_NativeBuffer OHNativeBuffer; + +typedef struct OHNativeWindowBuffer OHNativeWindowBuffer; + +typedef enum OH_NativeBuffer_Usage { + NATIVEBUFFER_USAGE_CPU_READ = (1ULL << 0), + NATIVEBUFFER_USAGE_CPU_WRITE = (1ULL << 1), + NATIVEBUFFER_USAGE_MEM_DMA = (1ULL << 3), + NATIVEBUFFER_USAGE_HW_RENDER = (1ULL << 8), + NATIVEBUFFER_USAGE_HW_TEXTURE = (1ULL << 9), + NATIVEBUFFER_USAGE_CPU_READ_OFTEN = (1ULL << 16), + NATIVEBUFFER_USAGE_ALIGNMENT_512 = (1ULL << 18), +} OHNativeBuffer_Usage; + +typedef enum OH_NativeBuffer_Format { + NATIVEBUFFER_PIXEL_FMT_CLUT8 = 0, + NATIVEBUFFER_PIXEL_FMT_CLUT1, + NATIVEBUFFER_PIXEL_FMT_CLUT4, + NATIVEBUFFER_PIXEL_FMT_RGB_565 = 3, + NATIVEBUFFER_PIXEL_FMT_RGBA_5658, + NATIVEBUFFER_PIXEL_FMT_RGBX_4444, + NATIVEBUFFER_PIXEL_FMT_RGBA_4444, + NATIVEBUFFER_PIXEL_FMT_RGB_444, + NATIVEBUFFER_PIXEL_FMT_RGBX_5551, + NATIVEBUFFER_PIXEL_FMT_RGBA_5551, + NATIVEBUFFER_PIXEL_FMT_RGB_555, + NATIVEBUFFER_PIXEL_FMT_RGBX_8888, + NATIVEBUFFER_PIXEL_FMT_RGBA_8888, + NATIVEBUFFER_PIXEL_FMT_RGB_888, + NATIVEBUFFER_PIXEL_FMT_BGR_565, + NATIVEBUFFER_PIXEL_FMT_BGRX_4444, + NATIVEBUFFER_PIXEL_FMT_BGRA_4444, + NATIVEBUFFER_PIXEL_FMT_BGRX_5551, + NATIVEBUFFER_PIXEL_FMT_BGRA_5551, + NATIVEBUFFER_PIXEL_FMT_BGRX_8888, + NATIVEBUFFER_PIXEL_FMT_BGRA_8888, + NATIVEBUFFER_PIXEL_FMT_YUV_422_T, + NATIVEBUFFER_PIXEL_FMT_YCBCR_422_SP, + NATIVEBUFFER_PIXEL_FMT_YCRCR_422_SP, + NATIVEBUFFER_PIXEL_FMT_YCBCR_420_SP, + NATIVEBUFFER_PIXEL_FMT_YCRCR_420_SP, + NATIVEBUFFER_PIXEL_FMT_YCBCR_422_P, + NATIVEBUFFER_PIXEL_FMT_YCRCR_422_P, + NATIVEBUFFER_PIXEL_FMT_YCBCR_420_P, + NATIVEBUFFER_PIXEL_FMT_YCRCR_420_P, + NATIVEBUFFER_PIXEL_FMT_YUYV_422_PKG, + NATIVEBUFFER_PIXEL_FMT_UYVY_422_PKG, + NATIVEBUFFER_PIXEL_FMT_YVYU_422_PKG, + NATIVEBUFFER_PIXEL_FMT_VYUY_422_PKG, + NATIVEBUFFER_PIXEL_FMT_RGBA_1010102, + NATIVEBUFFER_PIXEL_FMT_VENDER_MASK = 0X7FFF0000, + /* Invalid pixel format */ + NATIVEBUFFER_PIXEL_FMT_BUTT = 0X7FFFFFFF +} OHNativeBuffer_Format; + +typedef enum OH_NativeBuffer_ColorSpace { + OH_COLORSPACE_NONE, + OH_COLORSPACE_BT601_EBU_FULL, + OH_COLORSPACE_BT601_SMPTE_C_FULL, + OH_COLORSPACE_BT709_FULL, + OH_COLORSPACE_BT2020_HLG_FULL, + OH_COLORSPACE_BT2020_PQ_FULL, + OH_COLORSPACE_BT601_EBU_LIMIT, + OH_COLORSPACE_BT601_SMPTE_C_LIMIT, + OH_COLORSPACE_BT709_LIMIT, + OH_COLORSPACE_BT2020_HLG_LIMIT, + OH_COLORSPACE_BT2020_PQ_LIMIT, + OH_COLORSPACE_SRGB_FULL, + OH_COLORSPACE_P3_FULL, + OH_COLORSPACE_P3_HLG_FULL, + OH_COLORSPACE_P3_PQ_FULL, + OH_COLORSPACE_ADOBERGB_FULL, + OH_COLORSPACE_SRGB_LIMIT, + OH_COLORSPACE_P3_LIMIT, + OH_COLORSPACE_P3_HLG_LIMIT, + OH_COLORSPACE_P3_PQ_LIMIT, + OH_COLORSPACE_ADOBERGB_LIMIT, + OH_COLORSPACE_LINEAR_SRGB, + OH_COLORSPACE_LINEAR_BT709, + OH_COLORSPACE_LINEAR_P3, + OH_COLORSPACE_LINEAR_BT2020, + OH_COLORSPACE_DISPLAY_SRGB, + OH_COLORSPACE_DISPLAY_P3_SRGB, + OH_COLORSPACE_DISPLAY_P3_HLG, + OH_COLORSPACE_DISPLAY_P3_PQ, + OH_COLORSPACE_DISPLAY_BT2020_SRGB, + OH_COLORSPACE_DISPLAY_BT2020_HLG, + OH_COLORSPACE_DISPLAY_BT2020_PQ, +} OHNativeBuffer_ColorSpace; + +typedef struct OH_NativeBuffer_Config { + int32_t width; + int32_t height; + int32_t format; + int32_t usage; + int32_t stride; +} OHNativeBuffer_Config; + +using PFOH_NativeBuffer_Alloc = + OHNativeBuffer* (*)(const OHNativeBuffer_Config* config); +using PFOH_NativeBuffer_Reference = + int32_t (*)(OHNativeBuffer* buffer); +using PFOH_NativeBuffer_Unreference = + int32_t (*)(OHNativeBuffer* buffer); +using PFOH_NativeBuffer_GetConfig = + void (*)(OHNativeBuffer* buffer, OHNativeBuffer_Config* config); +using PFOH_NativeBuffer_Map = + int32_t (*)(OHNativeBuffer* buffer, void** out_virtual_address); +using PFOH_NativeBuffer_Unmap = + int32_t (*)(OHNativeBuffer* buffer); +using PFOH_NativeBuffer_GetSeqNum = + uint32_t (*)(OHNativeBuffer* buffer); +using PFOH_NativeBuffer_SetColorSpace = + int32_t (*)(OHNativeBuffer* buffer, OHNativeBuffer_ColorSpace color_space); + +using PFOH_NativeBuffer_FromNativeWindowBuffer = + int32_t (*)(OHNativeWindowBuffer* native_window_buffer, + OH_NativeBuffer** buffer); + +} // extern "C" + +#endif // UI_GFX_OHOS_OH_NATIVE_BUFFER_ABI_H_ diff --git a/ui/gfx/ohos/oh_native_buffer_compat.cc b/ui/gfx/ohos/oh_native_buffer_compat.cc new file mode 100644 index 0000000000..c7792fd9b3 --- /dev/null +++ b/ui/gfx/ohos/oh_native_buffer_compat.cc @@ -0,0 +1,100 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/oh_native_buffer_compat.h" + +#include + +#include "base/logging.h" + +#define LOAD_FUNCTION(lib, pfunc, func) \ + do { \ + pfunc = reinterpret_cast(dlsym(lib, #func)); \ + if (!pfunc) { \ + LOG(ERROR) << "Unable to load function " << #func; \ + return false; \ + } \ + } while (0) + +namespace gfx { + +// static +OHNativeBufferCompat& OHNativeBufferCompat::Get() { + static OHNativeBufferCompat instance; + return instance; +} + +bool OHNativeBufferCompat::IsSupported() { + return is_supported_; +} + +OHNativeBufferCompat::OHNativeBufferCompat() : is_supported_(LoadFunctions()) {} + +bool OHNativeBufferCompat::LoadFunctions() { + // Libraby path: /system/lib64/libnative_buffer.so -> libsurface.z.so + void* dl_handle = dlopen("libnative_buffer.so", RTLD_NOW); + if (!dl_handle) { + LOG(ERROR) << "Couldnt load libnative_buffer.so"; + return false; + } + + LOAD_FUNCTION(dl_handle, alloc_, OH_NativeBuffer_Alloc); + LOAD_FUNCTION(dl_handle, reference_, OH_NativeBuffer_Reference); + LOAD_FUNCTION(dl_handle, unreference_, OH_NativeBuffer_Unreference); + LOAD_FUNCTION(dl_handle, get_config_, OH_NativeBuffer_GetConfig); + LOAD_FUNCTION(dl_handle, map_, OH_NativeBuffer_Map); + LOAD_FUNCTION(dl_handle, unmap_, OH_NativeBuffer_Unmap); + LOAD_FUNCTION(dl_handle, get_sequence_number_, OH_NativeBuffer_GetSeqNum); + LOAD_FUNCTION(dl_handle, set_color_space_, OH_NativeBuffer_SetColorSpace); + + LOAD_FUNCTION(dl_handle, from_native_window_buffer_, + OH_NativeBuffer_FromNativeWindowBuffer); + + return true; +} + +OHNativeBuffer* OHNativeBufferCompat::Allocate( + const OHNativeBuffer_Config* config) { + return alloc_(config); +} + +int32_t OHNativeBufferCompat::Acquire(OHNativeBuffer* buffer) { + return reference_(buffer); +} + +int32_t OHNativeBufferCompat::Release(OHNativeBuffer* buffer) { + return unreference_(buffer); +} + +void OHNativeBufferCompat::GetConfig(OHNativeBuffer* buffer, + OHNativeBuffer_Config* config) { + get_config_(buffer, config); +} + +int32_t OHNativeBufferCompat::Map(OHNativeBuffer* buffer, + void** out_virtual_address) { + return map_(buffer, out_virtual_address); +} + +int32_t OHNativeBufferCompat::Unmap(OHNativeBuffer* buffer) { + return unmap_(buffer); +} + +uint32_t OHNativeBufferCompat::GetSequenceNumber(OHNativeBuffer* buffer) { + return get_sequence_number_(buffer); +} + +int32_t OHNativeBufferCompat::SetColorSpace( + OHNativeBuffer* buffer, + OHNativeBuffer_ColorSpace color_space) { + return set_color_space_(buffer, color_space); +} + +int32_t OHNativeBufferCompat::FromNativeWindowBuffer( + OHNativeWindowBuffer* native_window_buffer, + OHNativeBuffer** buffer) { + return from_native_window_buffer_(native_window_buffer, buffer); +} + +} // namespace gfx diff --git a/ui/gfx/ohos/oh_native_buffer_compat.h b/ui/gfx/ohos/oh_native_buffer_compat.h new file mode 100644 index 0000000000..387e010f98 --- /dev/null +++ b/ui/gfx/ohos/oh_native_buffer_compat.h @@ -0,0 +1,60 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_OH_NATIVE_BUFFER_COMPAT_H_ +#define UI_GFX_OHOS_OH_NATIVE_BUFFER_COMPAT_H_ + +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/ohos/oh_native_buffer_abi.h" + +namespace gfx { + +// This class provides runtime support for working with OHNativeBuffer objects +// on OpenHarmony systems without requiring building for the OpenHarmony NDK. +class GFX_EXPORT OHNativeBufferCompat { + public: + static OHNativeBufferCompat& Get(); + + OHNativeBufferCompat(const OHNativeBufferCompat&) = delete; + OHNativeBufferCompat& operator=(const OHNativeBufferCompat&) = delete; + + // Check if the native buffer usage is supported. + // This function returns TRUE if all the required functions are loaded. + bool IsSupported(); + + OHNativeBuffer* Allocate(const OHNativeBuffer_Config* config); + int32_t Acquire(OHNativeBuffer* buffer); + int32_t Release(OHNativeBuffer* buffer); + + void GetConfig(OHNativeBuffer* buffer, OHNativeBuffer_Config* config); + int32_t Map(OHNativeBuffer* buffer, void** out_virtual_address); + int32_t Unmap(OHNativeBuffer* buffer); + uint32_t GetSequenceNumber(OHNativeBuffer* buffer); + int32_t SetColorSpace(OHNativeBuffer* buffer, + OHNativeBuffer_ColorSpace color_space); + + int32_t FromNativeWindowBuffer(OHNativeWindowBuffer* native_window_buffer, + OHNativeBuffer** buffer); + + private: + OHNativeBufferCompat(); + bool LoadFunctions(); + + const bool is_supported_; + + PFOH_NativeBuffer_Alloc alloc_; + PFOH_NativeBuffer_Reference reference_; + PFOH_NativeBuffer_Unreference unreference_; + PFOH_NativeBuffer_GetConfig get_config_; + PFOH_NativeBuffer_Map map_; + PFOH_NativeBuffer_Unmap unmap_; + PFOH_NativeBuffer_GetSeqNum get_sequence_number_; + PFOH_NativeBuffer_SetColorSpace set_color_space_; + + PFOH_NativeBuffer_FromNativeWindowBuffer from_native_window_buffer_; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_OH_NATIVE_BUFFER_COMPAT_H_ diff --git a/ui/gfx/ohos/oh_native_surface_factory.cc b/ui/gfx/ohos/oh_native_surface_factory.cc new file mode 100644 index 0000000000..93c2746587 --- /dev/null +++ b/ui/gfx/ohos/oh_native_surface_factory.cc @@ -0,0 +1,76 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/oh_native_surface_factory.h" + +#include "base/check.h" +#include "base/containers/contains.h" +#include "ui/gfx/ohos/native_buffer.h" +#include "ui/gfx/ohos/native_buffer_manager.h" +#include "ui/gfx/ohos/native_buffer_queue_surface.h" + +namespace gfx { + +// static +bool OHNativeSurfaceFactory::SupportsNativeBuffers() { + return NativeBufferManager::SupportsNativeBuffers(); +} + +// static +OHNativeSurfaceFactory* OHNativeSurfaceFactory::GetInstance() { + static base::NoDestructor instance; + return instance.get(); +} + +OHNativeSurfaceFactory::OHNativeSurfaceFactory() + : buffer_manager_(std::make_unique()) {} + +OHNativeSurfaceFactory::~OHNativeSurfaceFactory() = default; + +bool OHNativeSurfaceFactory::CanCreateNativeBufferForFormat( + gfx::BufferFormat format) { + return buffer_manager_->CanCreateNativeBufferForFormat(format); +} + +scoped_refptr OHNativeSurfaceFactory::CreateNativeBuffer( + gfx::AcceleratedWidget widget, + const gfx::Size& size, + gfx::BufferFormat format, + gfx::BufferUsage usage) { + if (widget != kNullAcceleratedWidget && usage == gfx::BufferUsage::SCANOUT) { + // The usage SCANOUT is for a primary plane buffer. + auto* surface = GetSurface(widget); + CHECK(surface); + return surface->AllocatePrimaryPlaneBuffer(size); + } + return buffer_manager_->CreateNativeBuffer(size, format, usage); +} + +void OHNativeSurfaceFactory::RegisterSurface( + gfx::AcceleratedWidget widget, + NativeBufferQueueSurface* surface) { + base::AutoLock lock(surface_lock_); + DCHECK(!base::Contains(surface_map_, widget)); + surface_map_.emplace(widget, surface); +} + +void OHNativeSurfaceFactory::UnregisterSurface(gfx::AcceleratedWidget widget) { + base::AutoLock lock(surface_lock_); + auto it = surface_map_.find(widget); + DCHECK(it != surface_map_.end()); + surface_map_.erase(it); +} + +NativeBufferQueueSurface* OHNativeSurfaceFactory::GetSurface( + gfx::AcceleratedWidget widget) { + base::AutoLock lock(surface_lock_); + auto it = surface_map_.find(widget); + if (it == surface_map_.end()) { + return nullptr; + } + + return it->second; +} + +} // namespace gfx diff --git a/ui/gfx/ohos/oh_native_surface_factory.h b/ui/gfx/ohos/oh_native_surface_factory.h new file mode 100644 index 0000000000..87ad053519 --- /dev/null +++ b/ui/gfx/ohos/oh_native_surface_factory.h @@ -0,0 +1,73 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_OH_NATIVE_SURFACE_FACTORY_H_ +#define UI_GFX_OHOS_OH_NATIVE_SURFACE_FACTORY_H_ + +#include "base/containers/flat_map.h" +#include "base/memory/scoped_refptr.h" +#include "base/no_destructor.h" +#include "base/synchronization/lock.h" +#include "ui/gfx/buffer_types.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/native_widget_types.h" + +namespace gfx { + +class NativeBufferManager; +class NativeBufferQueueSurface; +class NativeBuffer; + +class GFX_EXPORT OHNativeSurfaceFactory { + public: + static bool SupportsNativeBuffers(); + + static OHNativeSurfaceFactory* GetInstance(); + + OHNativeSurfaceFactory(const OHNativeSurfaceFactory&) = delete; + OHNativeSurfaceFactory& operator=(const OHNativeSurfaceFactory&) = delete; + + bool CanCreateNativeBufferForFormat(gfx::BufferFormat format); + + // Create a single native buffer to be used for overlay planes or zero copy + // for |widget| representing a buffer queue surface. |size| corresponds to the + // dimensions used to allocate the buffer. + scoped_refptr CreateNativeBuffer( + gfx::AcceleratedWidget widget, + const gfx::Size& size, + gfx::BufferFormat format, + gfx::BufferUsage usage); + + // Registers a surface for a |widget|. + void RegisterSurface(gfx::AcceleratedWidget widget, + NativeBufferQueueSurface* surface) + LOCKS_EXCLUDED(surface_lock_); + + // Unregister a surface for a |widget|. + void UnregisterSurface(gfx::AcceleratedWidget widget) + LOCKS_EXCLUDED(surface_lock_); + + // Returns the surface for a |widget|. + NativeBufferQueueSurface* GetSurface(gfx::AcceleratedWidget widget) + LOCKS_EXCLUDED(surface_lock_); + + NativeBufferManager* buffer_manager() const { return buffer_manager_.get(); } + + private: + friend class base::NoDestructor; + + OHNativeSurfaceFactory(); + ~OHNativeSurfaceFactory(); + + std::unique_ptr buffer_manager_; + + base::flat_map surface_map_ + GUARDED_BY(surface_lock_); + base::Lock surface_lock_; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_OH_NATIVE_SURFACE_FACTORY_H_ diff --git a/ui/gfx/ohos/oh_native_window_abi.h b/ui/gfx/ohos/oh_native_window_abi.h new file mode 100755 index 0000000000..bd802a6977 --- /dev/null +++ b/ui/gfx/ohos/oh_native_window_abi.h @@ -0,0 +1,97 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_OH_NATIVE_WINDOW_ABI_H_ +#define UI_GFX_OHOS_OH_NATIVE_WINDOW_ABI_H_ + +// Minimal binary interface definitions for OHNativeWindow based on +// include/native_window/external_window.h from the OHOS NDK for platform +// level 8+. This is only intended for use from the OHNativeWindowCompat +// wrapper for building without NDK platform level support, it is not a +// general-use header and is not complete. +// +// Delete this file when third_party/ohos_ndk/ is updated to a version +// that contains the native_window/external_window.h file. +// +// Please refer to the API documentation for details: +// https://developer.huawei.com/ndk/reference/external__window_8h.html + +#include + +// Use "C" linkage to match the original header file. This isn't strictly +// required since the file is not declaring global functions, but the types +// should remain in the global namespace for compatibility, and it's a reminder +// that forward declarations elsewhere should use "extern "C" to avoid +// namespace issues. +extern "C" { + +typedef struct OHNativeWindow OHNativeWindow; +typedef struct OHNativeWindowBuffer OHNativeWindowBuffer; + +typedef struct OH_NativeBuffer OHNativeBuffer; + +typedef struct Region { + struct Rect { + int32_t x; + int32_t y; + uint32_t w; + uint32_t h; + } *rects; + int32_t rectNumber; +} Region; + +enum NativeWindowOperation { + SET_BUFFER_GEOMETRY, + GET_BUFFER_GEOMETRY, + GET_FORMAT, + SET_FORMAT, + GET_USAGE, + SET_USAGE, + SET_STRIDE, + GET_STRIDE, + SET_SWAP_INTERVAL, + GET_SWAP_INTERVAL, + SET_TIMEOUT, + GET_TIMEOUT, + SET_COLOR_GAMUT, + GET_COLOR_GAMUT, + SET_TRANSFORM, + GET_TRANSFORM, + SET_UI_TIMESTAMP, + GET_BUFFERQUEUE_SIZE, +}; + +// For OHNativeObject +using PFOH_NativeWindow_NativeObjectReference = int32_t (*)(void*); +using PFOH_NativeWindow_NativeObjectUnreference = int32_t (*)(void*); + +// For OHNativeWindow +using PFOH_NativeWindow_CreateNativeWindow = + OHNativeWindow* (*)(void* surface); +using PFOH_NativeWindow_DestroyNativeWindow = + void (*)(OHNativeWindow* window); +using PFOH_NativeWindow_NativeWindowHandleOpt = + int32_t (*)(OHNativeWindow *window, int code, ...); + +// For OHNativeWindowBuffer +using PFOH_NativeWindow_CreateNativeWindowBufferFromNativeBuffer = + OHNativeWindowBuffer* (*)(OHNativeBuffer* native_buffer); +using PFOH_NativeWindow_DestroyNativeWindowBuffer = + void (*)(OHNativeWindowBuffer* buffer); +using PFOH_NativeWindow_NativeWindowRequestBuffer = + int32_t (*)(OHNativeWindow *window, OHNativeWindowBuffer **buffer, + int *fence); +using PFOH_NativeWindow_NativeWindowFlushBuffer = + int32_t (*)(OHNativeWindow *window, OHNativeWindowBuffer *buffer, + int fence, Region region); +using PFOH_NativeWindow_NativeWindowAbortBuffer = + int32_t (*)(OHNativeWindow *window, OHNativeWindowBuffer *buffer); +using PFOH_NativeWindow_NativeWindowAttachBuffer = + int32_t (*)(OHNativeWindow *window, OHNativeWindowBuffer *buffer); +using PFOH_NativeWindow_NativeWindowDetachBuffer = + int32_t (*)(OHNativeWindow *window, OHNativeWindowBuffer *buffer); + +} // extern "C" + +#endif // UI_GFX_OHOS_OH_NATIVE_WINDOW_ABI_H_ diff --git a/ui/gfx/ohos/oh_native_window_compat.cc b/ui/gfx/ohos/oh_native_window_compat.cc new file mode 100644 index 0000000000..0363ddebfd --- /dev/null +++ b/ui/gfx/ohos/oh_native_window_compat.cc @@ -0,0 +1,189 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/oh_native_window_compat.h" + +#include +#include + +#include "base/logging.h" +#include "base/notreached.h" + +#define LOAD_FUNCTION(lib, pfunc, func) \ + do { \ + pfunc = reinterpret_cast(dlsym(lib, #func)); \ + if (!pfunc) { \ + LOG(ERROR) << "Unable to load function " << #func; \ + return false; \ + } \ + } while (0) + +namespace gfx { + +// static +OHNativeWindowCompat& OHNativeWindowCompat::Get() { + static OHNativeWindowCompat instance; + return instance; +} + +bool OHNativeWindowCompat::IsSupported() { + return is_supported_; +} + +OHNativeWindowCompat::OHNativeWindowCompat() : is_supported_(LoadFunctions()) {} + +bool OHNativeWindowCompat::LoadFunctions() { + // Libraby path: /system/lib64/libnative_window.so -> libsurface.z.so + void* dl_handle = dlopen("libnative_window.so", RTLD_NOW); + if (!dl_handle) { + LOG(ERROR) << "Couldnt load libnative_window.so"; + return false; + } + + LOAD_FUNCTION(dl_handle, reference_, OH_NativeWindow_NativeObjectReference); + LOAD_FUNCTION(dl_handle, unreference_, + OH_NativeWindow_NativeObjectUnreference); + + LOAD_FUNCTION(dl_handle, create_native_window_, + OH_NativeWindow_CreateNativeWindow); + LOAD_FUNCTION(dl_handle, destroy_native_window_, + OH_NativeWindow_DestroyNativeWindow); + + LOAD_FUNCTION(dl_handle, create_native_window_buffer_, + OH_NativeWindow_CreateNativeWindowBufferFromNativeBuffer); + LOAD_FUNCTION(dl_handle, destroy_native_window_buffer_, + OH_NativeWindow_DestroyNativeWindowBuffer); + + LOAD_FUNCTION(dl_handle, native_window_handle_options_, + OH_NativeWindow_NativeWindowHandleOpt); + LOAD_FUNCTION(dl_handle, native_window_request_buffer_, + OH_NativeWindow_NativeWindowRequestBuffer); + LOAD_FUNCTION(dl_handle, native_window_flush_buffer_, + OH_NativeWindow_NativeWindowFlushBuffer); + LOAD_FUNCTION(dl_handle, native_window_abort_buffer_, + OH_NativeWindow_NativeWindowAbortBuffer); + LOAD_FUNCTION(dl_handle, native_window_attach_buffer_, + OH_NativeWindow_NativeWindowAttachBuffer); + LOAD_FUNCTION(dl_handle, native_window_detach_buffer_, + OH_NativeWindow_NativeWindowDetachBuffer); + + return true; +} + +int32_t OHNativeWindowCompat::Acquire(void* object) { + return reference_(object); +} + +int32_t OHNativeWindowCompat::Release(void* object) { + return unreference_(object); +} + +OHNativeWindow* OHNativeWindowCompat::CreateNativeWindow(void* surface) { + return create_native_window_(surface); +} + +void OHNativeWindowCompat::DestroyNativeWindow(OHNativeWindow* window) { + destroy_native_window_(window); +} + +OHNativeWindowBuffer* OHNativeWindowCompat::CreateNativeWindowBuffer( + OHNativeBuffer* native_buffer) { + return create_native_window_buffer_(native_buffer); +} + +void OHNativeWindowCompat::DestroyNativeWindowBuffer( + OHNativeWindowBuffer* buffer) { + destroy_native_window_buffer_(buffer); +} + +int32_t OHNativeWindowCompat::NativeWindowHandleOptions(OHNativeWindow* window, + int code, + ...) { + int32_t result = -1; + + va_list args; + va_start(args, code); + + switch (code) { + case SET_BUFFER_GEOMETRY: { + int32_t width = va_arg(args, int32_t); + int32_t height = va_arg(args, int32_t); + result = native_window_handle_options_(window, code, width, height); + break; + } + case GET_BUFFER_GEOMETRY: { + int32_t* height = va_arg(args, int32_t*); + int32_t* width = va_arg(args, int32_t*); + result = native_window_handle_options_(window, code, height, width); + break; + } + case SET_TIMEOUT: { + int32_t timeout = va_arg(args, int32_t); + result = native_window_handle_options_(window, code, timeout); + break; + } + case GET_TIMEOUT: { + int32_t* timeout = va_arg(args, int32_t*); + result = native_window_handle_options_(window, code, timeout); + break; + } + case GET_FORMAT: { + int32_t* format = va_arg(args, int32_t*); + result = native_window_handle_options_(window, code, format); + break; + } + case GET_USAGE: { + uint64_t* usage = va_arg(args, uint64_t*); + result = native_window_handle_options_(window, code, usage); + break; + } + case GET_BUFFERQUEUE_SIZE: { + int32_t* size = va_arg(args, int32_t*); + result = native_window_handle_options_(window, code, size); + break; + } + default: + NOTREACHED() << "Unsupported code: " << code; + break; + } + + va_end(args); + + return result; +} + +int32_t OHNativeWindowCompat::NativeWindowRequestBuffer( + OHNativeWindow* window, + OHNativeWindowBuffer** buffer, + int* fence) { + return native_window_request_buffer_(window, buffer, fence); +} + +int32_t OHNativeWindowCompat::NativeWindowFlushBuffer( + OHNativeWindow* window, + OHNativeWindowBuffer* buffer, + int fence, + Region region) { + return native_window_flush_buffer_(window, buffer, fence, region); +} + +int32_t OHNativeWindowCompat::NativeWindowAbortBuffer( + OHNativeWindow* window, + OHNativeWindowBuffer* buffer) { + return native_window_abort_buffer_(window, buffer); +} + +int32_t OHNativeWindowCompat::NativeWindowAttachBuffer( + OHNativeWindow* window, + OHNativeWindowBuffer* buffer) { + return native_window_attach_buffer_(window, buffer); +} + +int32_t OHNativeWindowCompat::NativeWindowDetachBuffer( + OHNativeWindow* window, + OHNativeWindowBuffer* buffer) { + return native_window_detach_buffer_(window, buffer); +} + +} // namespace gfx diff --git a/ui/gfx/ohos/oh_native_window_compat.h b/ui/gfx/ohos/oh_native_window_compat.h new file mode 100644 index 0000000000..a9e4bef4ee --- /dev/null +++ b/ui/gfx/ohos/oh_native_window_compat.h @@ -0,0 +1,80 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_OH_NATIVE_WINDOW_COMPAT_H_ +#define UI_GFX_OHOS_OH_NATIVE_WINDOW_COMPAT_H_ + +#include "ui/gfx/gfx_export.h" +#include "ui/gfx/ohos/oh_native_window_abi.h" + +namespace gfx { + +// This class provides runtime support for working with OHNativeWindow and +// OHNativeWindowBuffer objects on OpenHarmony systems without requiring +// building for the OpenHarmony NDK. +class GFX_EXPORT OHNativeWindowCompat { + public: + static OHNativeWindowCompat& Get(); + + OHNativeWindowCompat(const OHNativeWindowCompat&) = delete; + OHNativeWindowCompat& operator=(const OHNativeWindowCompat&) = delete; + + // Check if the native window and native window buffer usage is supported. + // This function returns TRUE if all the required functions are loaded. + bool IsSupported(); + + // Acquire a reference on the given OHNativeObject object. + int32_t Acquire(void* object); + // Remove a reference that was previously acquired. + int32_t Release(void* object); + + OHNativeWindow* CreateNativeWindow(void* surface); + void DestroyNativeWindow(OHNativeWindow* window); + + OHNativeWindowBuffer* CreateNativeWindowBuffer(OHNativeBuffer* native_buffer); + void DestroyNativeWindowBuffer(OHNativeWindowBuffer* buffer); + + int32_t NativeWindowHandleOptions(OHNativeWindow* window, int code, ...); + + int32_t NativeWindowRequestBuffer(OHNativeWindow* window, + OHNativeWindowBuffer** buffer, + int* fence); + int32_t NativeWindowFlushBuffer(OHNativeWindow* window, + OHNativeWindowBuffer* buffer, + int fence, + Region region); + int32_t NativeWindowAbortBuffer(OHNativeWindow* window, + OHNativeWindowBuffer* buffer); + int32_t NativeWindowAttachBuffer(OHNativeWindow* window, + OHNativeWindowBuffer* buffer); + int32_t NativeWindowDetachBuffer(OHNativeWindow* window, + OHNativeWindowBuffer* buffer); + + private: + OHNativeWindowCompat(); + bool LoadFunctions(); + + const bool is_supported_; + + PFOH_NativeWindow_NativeObjectReference reference_; + PFOH_NativeWindow_NativeObjectUnreference unreference_; + + PFOH_NativeWindow_CreateNativeWindow create_native_window_; + PFOH_NativeWindow_DestroyNativeWindow destroy_native_window_; + + PFOH_NativeWindow_CreateNativeWindowBufferFromNativeBuffer + create_native_window_buffer_; + PFOH_NativeWindow_DestroyNativeWindowBuffer destroy_native_window_buffer_; + + PFOH_NativeWindow_NativeWindowHandleOpt native_window_handle_options_; + PFOH_NativeWindow_NativeWindowRequestBuffer native_window_request_buffer_; + PFOH_NativeWindow_NativeWindowFlushBuffer native_window_flush_buffer_; + PFOH_NativeWindow_NativeWindowAbortBuffer native_window_abort_buffer_; + PFOH_NativeWindow_NativeWindowAttachBuffer native_window_attach_buffer_; + PFOH_NativeWindow_NativeWindowDetachBuffer native_window_detach_buffer_; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_OH_NATIVE_WINDOW_COMPAT_H_ diff --git a/ui/gfx/ohos/scoped_native_buffer_handle.cc b/ui/gfx/ohos/scoped_native_buffer_handle.cc new file mode 100644 index 0000000000..b7d4d51828 --- /dev/null +++ b/ui/gfx/ohos/scoped_native_buffer_handle.cc @@ -0,0 +1,122 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/scoped_native_buffer_handle.h" + +#include "base/check.h" +#include "ui/gfx/ohos/oh_native_buffer_compat.h" +#include "ui/gfx/ohos/oh_native_window_compat.h" + +namespace gfx { + +ScopedNativeBufferHandle::ScopedNativeBufferHandle() = default; + +ScopedNativeBufferHandle::ScopedNativeBufferHandle( + ScopedNativeBufferHandle&& other) { + *this = std::move(other); +} + +ScopedNativeBufferHandle::~ScopedNativeBufferHandle() { + Reset(); +} + +// static +ScopedNativeBufferHandle ScopedNativeBufferHandle::Adopt(Buffer buffer) { + DCHECK(buffer.is_valid()); + return ScopedNativeBufferHandle(std::move(buffer)); +} + +// static +ScopedNativeBufferHandle ScopedNativeBufferHandle::Create(Buffer buffer) { + DCHECK(buffer.is_valid()); + + if (buffer.native_buffer) { + OHNativeBufferCompat::Get().Acquire(buffer.native_buffer); + } else if (buffer.native_window_buffer) { + OHNativeWindowCompat::Get().Acquire(buffer.native_window_buffer); + } + + return ScopedNativeBufferHandle(std::move(buffer)); +} + +ScopedNativeBufferHandle& ScopedNativeBufferHandle::operator=( + ScopedNativeBufferHandle&& other) { + Reset(); + std::swap(buffer_, other.buffer_); + return *this; +} + +bool ScopedNativeBufferHandle::is_valid() const { + return buffer_.has_value(); +} + +const ScopedNativeBufferHandle::Buffer* ScopedNativeBufferHandle::get() const { + return buffer_.has_value() ? &(buffer_.value()) : nullptr; +} + +OHNativeBuffer* ScopedNativeBufferHandle::native_buffer() const { + return buffer_.has_value() ? buffer_.value().native_buffer : nullptr; +} + +OHNativeWindowBuffer* ScopedNativeBufferHandle::native_window_buffer() const { + return buffer_.has_value() ? buffer_.value().native_window_buffer : nullptr; +} + +void ScopedNativeBufferHandle::Reset() { + if (buffer_.has_value()) { + const auto& buffer = buffer_.value(); + + if (buffer.native_buffer) { + OHNativeBufferCompat::Get().Release(buffer.native_buffer); + } else if (buffer.native_window_buffer) { + OHNativeWindowCompat::Get().Release(buffer.native_window_buffer); + } + } + + buffer_.reset(); +} + +ScopedNativeBufferHandle::Buffer ScopedNativeBufferHandle::Take() { + Buffer buffer = buffer_.value_or(Buffer()); + buffer_.reset(); + return buffer; +} + +ScopedNativeBufferHandle ScopedNativeBufferHandle::Clone() const { + DCHECK(buffer_.has_value()); + + Buffer buffer = buffer_.value(); + + if (buffer.native_buffer) { + OHNativeBufferCompat::Get().Acquire(buffer.native_buffer); + } else if (buffer.native_window_buffer) { + OHNativeWindowCompat::Get().Acquire(buffer.native_window_buffer); + } + + return ScopedNativeBufferHandle(std::move(buffer)); +} + +ScopedNativeBufferHandle ScopedNativeBufferHandle::CloneAsClientBuffer() const { + DCHECK(buffer_.has_value()); + + Buffer buffer = buffer_.value(); + + if (buffer.native_buffer) { + buffer.native_window_buffer = + OHNativeWindowCompat::Get().CreateNativeWindowBuffer( + buffer.native_buffer); + buffer.native_buffer = nullptr; + } else if (buffer.native_window_buffer) { + OHNativeWindowCompat::Get().Acquire(buffer.native_window_buffer); + } + + DCHECK(buffer.native_window_buffer); + return ScopedNativeBufferHandle(std::move(buffer)); +} + +ScopedNativeBufferHandle::ScopedNativeBufferHandle( + absl::optional buffer) + : buffer_(std::move(buffer)) {} + +} // namespace gfx diff --git a/ui/gfx/ohos/scoped_native_buffer_handle.h b/ui/gfx/ohos/scoped_native_buffer_handle.h new file mode 100644 index 0000000000..d223cb724f --- /dev/null +++ b/ui/gfx/ohos/scoped_native_buffer_handle.h @@ -0,0 +1,88 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_SCOPED_NATIVE_BUFFER_HANDLE_H_ +#define UI_GFX_OHOS_SCOPED_NATIVE_BUFFER_HANDLE_H_ + +#include "base/memory/raw_ptr.h" +#include "third_party/abseil-cpp/absl/types/optional.h" +#include "ui/gfx/gfx_export.h" + +extern "C" { +typedef struct OH_NativeBuffer OHNativeBuffer; +typedef struct OHNativeWindowBuffer OHNativeWindowBuffer; +} + +namespace gfx { + +// Owns a single reference to a native buffer object (native buffer), +// native window buffer object (primary plane buffer or EGL client buffer). +class GFX_EXPORT ScopedNativeBufferHandle { + public: + struct Buffer { + raw_ptr native_buffer = nullptr; + raw_ptr native_window_buffer = nullptr; + + bool is_valid() { return (native_buffer || native_window_buffer); } + }; + + ScopedNativeBufferHandle(); + + // Takes ownership of |other|'s buffer reference. Does NOT acquire a new one. + ScopedNativeBufferHandle(ScopedNativeBufferHandle&& other); + + ScopedNativeBufferHandle(const ScopedNativeBufferHandle&) = delete; + ScopedNativeBufferHandle& operator=(const ScopedNativeBufferHandle&) = delete; + + // Releases this handle's reference to the underlying buffer object if still + // valid. + ~ScopedNativeBufferHandle(); + + // Assumes ownership of an existing reference to |buffer|. This does NOT + // acquire a new reference. + static ScopedNativeBufferHandle Adopt(Buffer buffer); + + // Adds a reference to |buffer| managed by this handle. + static ScopedNativeBufferHandle Create(Buffer buffer); + + // Takes ownership of |other|'s buffer reference. Does NOT acquire a new one. + ScopedNativeBufferHandle& operator=(ScopedNativeBufferHandle&& other); + + bool is_valid() const; + + const Buffer* get() const; + + OHNativeBuffer* native_buffer() const; + OHNativeWindowBuffer* native_window_buffer() const; + + // Releases this handle's reference to the underlying buffer object if still + // valid. Invalidates this handle. + void Reset(); + + // Passes implicit ownership of this handle's reference over to the caller, + // invalidating |this|. Returns the raw buffer handle. + // + // The caller is responsible for eventually releasing this reference to the + // buffer object. + [[nodiscard]] Buffer Take(); + + // Creates a new handle with its own newly acquired reference to the + // underlying buffer object. |this| must be a valid handle. + ScopedNativeBufferHandle Clone() const; + + // Creates a new handle with its own newly acquired reference to the + // underlying native window buffer object. |this| must be a valid handle. + ScopedNativeBufferHandle CloneAsClientBuffer() const; + + private: + // Assumes ownership of an existing reference to |buffer|. This does NOT + // acquire a new reference. + explicit ScopedNativeBufferHandle(absl::optional buffer); + + absl::optional buffer_; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_SCOPED_NATIVE_BUFFER_HANDLE_H_ diff --git a/ui/gfx/ohos/scoped_oh_native_window.cc b/ui/gfx/ohos/scoped_oh_native_window.cc new file mode 100644 index 0000000000..1cb40b801f --- /dev/null +++ b/ui/gfx/ohos/scoped_oh_native_window.cc @@ -0,0 +1,49 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gfx/ohos/scoped_oh_native_window.h" + +#include + +#include "ui/gfx/ohos/oh_native_window_compat.h" + +namespace gfx { + +// static +ScopedOHNativeWindow ScopedOHNativeWindow::Wrap(void* window) { + return ScopedOHNativeWindow(reinterpret_cast(window)); +} + +ScopedOHNativeWindow::ScopedOHNativeWindow() = default; + +ScopedOHNativeWindow::ScopedOHNativeWindow(OHNativeWindow* window) + : window_(window) { + if (window_) { + OHNativeWindowCompat::Get().Acquire(window_); + } +} + +ScopedOHNativeWindow::~ScopedOHNativeWindow() { + Reset(); +} + +ScopedOHNativeWindow::ScopedOHNativeWindow(ScopedOHNativeWindow&& other) { + *this = std::move(other); +} + +ScopedOHNativeWindow& ScopedOHNativeWindow::operator=( + ScopedOHNativeWindow&& other) { + Reset(); + std::swap(window_, other.window_); + return *this; +} + +void ScopedOHNativeWindow::Reset() { + if (window_) { + OHNativeWindowCompat::Get().Release(window_); + } + window_ = nullptr; +} + +} // namespace gfx diff --git a/ui/gfx/ohos/scoped_oh_native_window.h b/ui/gfx/ohos/scoped_oh_native_window.h new file mode 100644 index 0000000000..98c8783bdb --- /dev/null +++ b/ui/gfx/ohos/scoped_oh_native_window.h @@ -0,0 +1,49 @@ +// Copyright (c) 2024 Huawei Device Co., Ltd. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GFX_OHOS_SCOPED_OH_NATIVE_WINDOW_H_ +#define UI_GFX_OHOS_SCOPED_OH_NATIVE_WINDOW_H_ + +#include "base/memory/raw_ptr_exclusion.h" +#include "ui/gfx/gfx_export.h" + +extern "C" struct OHNativeWindow; + +namespace gfx { + +// Owns a single reference to a native window object. +class GFX_EXPORT ScopedOHNativeWindow { + public: + ScopedOHNativeWindow(); + + // Takes ownership of |other|'s window reference. Does NOT acquire a new one. + ScopedOHNativeWindow(ScopedOHNativeWindow&& other); + ScopedOHNativeWindow& operator=(ScopedOHNativeWindow&& other); + + ScopedOHNativeWindow(const ScopedOHNativeWindow&) = delete; + ScopedOHNativeWindow& operator=(const ScopedOHNativeWindow&) = delete; + + ~ScopedOHNativeWindow(); + + // Adds a reference to |window| object. + static ScopedOHNativeWindow Wrap(void* window); + + explicit operator bool() const { return !!window_; } + + OHNativeWindow* oh_native_window() const { return window_; } + + private: + explicit ScopedOHNativeWindow(OHNativeWindow* window); + + // Releases reference to the underlying window object if still valid. + void Reset(); + + // This field is not a raw_ptr<> because it was filtered by the rewriter for: + // #constexpr-ctor-field-initializer, #global-scope + RAW_PTR_EXCLUSION OHNativeWindow* window_ = nullptr; +}; + +} // namespace gfx + +#endif // UI_GFX_OHOS_SCOPED_OH_NATIVE_WINDOW_H_ diff --git a/ui/gl/BUILD.gn b/ui/gl/BUILD.gn index d2bb8a3668..61ab8f954b 100644 --- a/ui/gl/BUILD.gn +++ b/ui/gl/BUILD.gn @@ -135,6 +135,8 @@ component("gl") { if (is_ohos) { import("//build/config/ohos/config.gni") sources += [ + "gl_surface_egl_buffer_queue_presenter.cc", + "gl_surface_egl_buffer_queue_presenter.h", "gl_surface_egl_ohos.cc", "gl_surface_egl_ohos.h", "ohos/ohos_native_image.cc", diff --git a/ui/gl/gl_surface_egl_buffer_queue_presenter.cc b/ui/gl/gl_surface_egl_buffer_queue_presenter.cc new file mode 100644 index 0000000000..f5d667a728 --- /dev/null +++ b/ui/gl/gl_surface_egl_buffer_queue_presenter.cc @@ -0,0 +1,293 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "ui/gl/gl_surface_egl_buffer_queue_presenter.h" + +#include "base/check_op.h" +#include "base/feature_list.h" +#include "base/functional/bind.h" +#include "base/logging.h" +#include "base/task/single_thread_task_runner.h" +#include "base/task/thread_pool.h" +#include "ui/gfx/ohos/oh_native_surface_factory.h" +#include "ui/gl/gl_bindings.h" +#include "ui/gl/gl_fence_egl.h" + +namespace gl { + +namespace { + +// If enabled, allows to control surface visibility including release of the +// output presenter resources in background. +BASE_FEATURE(kBufferQueueSurfacePresenterVisibilityControl, + "BufferQueueSurfacePresenterVisibilityControl", + base::FEATURE_ENABLED_BY_DEFAULT); + +void WaitForFence(EGLDisplay display, EGLSyncKHR fence) { + eglClientWaitSyncKHR(display, fence, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, + EGL_FOREVER_KHR); + eglDestroySyncKHR(display, fence); +} + +} // namespace + +GLSurfaceEGLBufferQueuePresenter::GLSurfaceEGLBufferQueuePresenter( + gfx::OHNativeSurfaceFactory* surface_factory, + GLDisplayEGL* display, + gfx::AcceleratedWidget widget, + gfx::ScopedOHNativeWindow window) + : surface_factory_(surface_factory), + widget_(widget), + has_implicit_external_sync_( + display->ext->b_EGL_ARM_implicit_external_sync), + display_(display), + task_runner_(base::SingleThreadTaskRunner::GetCurrentDefault()), + fence_wait_task_runner_(base::ThreadPool::CreateSequencedTaskRunner( + {base::MayBlock(), + base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN})), + buffer_queue_(std::make_unique( + surface_factory_->buffer_manager(), + std::move(window))) { + surface_factory_->RegisterSurface(widget_, this); + unsubmitted_frames_.push_back( + std::make_unique(next_frame_id_.GetNext())); +} + +GLSurfaceEGLBufferQueuePresenter::~GLSurfaceEGLBufferQueuePresenter() { + surface_factory_->UnregisterSurface(widget_); + Destroy(); +} + +bool GLSurfaceEGLBufferQueuePresenter::Initialize() { + return buffer_queue_->Initialize(); +} + +void GLSurfaceEGLBufferQueuePresenter::Destroy() { + unsubmitted_frames_.clear(); + buffer_queue_.reset(); +} + +bool GLSurfaceEGLBufferQueuePresenter::Resize( + const gfx::Size& size, + float scale_factor, + const gfx::ColorSpace& color_space, + bool has_alpha) { + if (size_ == size) { + return true; + } + + if (!buffer_queue_->Resize(size)) { + return false; + } + + size_ = size; + + // Early end access to unsubmitted frame buffers (see SWAP_SKIPPED). + for (auto& frame : unsubmitted_frames_) { + if (frame->buffer) { + frame->buffer->EndAccess(); + frame->buffer.reset(); + } + } + + last_buffer_sequence_number_.reset(); + return true; +} + +void GLSurfaceEGLBufferQueuePresenter::Present( + SwapCompletionCallback completion_callback, + PresentationCallback presentation_callback, + gfx::FrameData data) { + // If last swap failed, don't try to schedule new ones. + if (last_swap_result_ == gfx::SwapResult::SWAP_FAILED) { + std::move(completion_callback) + .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_FAILED)); + // Notify the caller, the buffer is never presented on a screen. + std::move(presentation_callback).Run(gfx::PresentationFeedback::Failure()); + return; + } + + PendingFrame* frame = unsubmitted_frames_.back().get(); + + // The frame with the already scheduled native buffer should not be + // submitted again. + if (frame->buffer->sequence_number() == last_buffer_sequence_number_) { + std::move(completion_callback) + .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK)); + + gfx::PresentationFeedback feedback(base::TimeTicks::Now(), + base::TimeDelta(), 0 /*flags*/); + std::move(presentation_callback).Run(std::move(feedback)); + + unsubmitted_frames_.back() = + std::make_unique(next_frame_id_.GetNext()); + return; + } + + last_buffer_sequence_number_ = frame->buffer->sequence_number(); + + frame->local_frame = data.local_frame; + frame->completion_callback = std::move(completion_callback); + frame->presentation_callback = std::move(presentation_callback); + unsubmitted_frames_.push_back( + std::make_unique(next_frame_id_.GetNext())); + + // The buffer queue supports plane gpu fences, so buffer should be shipped + // with valid gpu fence. Otherwise, we will wait for egl fence sync. + if ((frame->gpu_fence && !frame->gpu_fence->GetGpuFenceHandle().is_null()) || + !use_egl_fence_sync_) { + frame->ready = true; + SubmitFrame(); + return; + } + + glFlush(); + + EGLSyncKHR fence = InsertFence(has_implicit_external_sync_); + CHECK_NE(fence, EGL_NO_SYNC_KHR) << "eglCreateSyncKHR failed"; + + base::OnceClosure fence_wait_task = + base::BindOnce(&WaitForFence, GetEGLDisplay(), fence); + + base::OnceClosure fence_retired_callback = + base::BindOnce(&GLSurfaceEGLBufferQueuePresenter::FenceRetired, + weak_factory_.GetWeakPtr(), frame->frame_id); + + fence_wait_task_runner_->PostTaskAndReply( + FROM_HERE, std::move(fence_wait_task), std::move(fence_retired_callback)); +} + +bool GLSurfaceEGLBufferQueuePresenter::ScheduleOverlayPlane( + OverlayImage image, + std::unique_ptr gpu_fence, + const gfx::OverlayPlaneData& overlay_plane_data) { + DCHECK(!unsubmitted_frames_.empty()); + PendingFrame* frame = unsubmitted_frames_.back().get(); + frame->buffer = std::move(image); + frame->gpu_fence = std::move(gpu_fence); + frame->overlay_plane_data = overlay_plane_data; + return true; +} + +bool GLSurfaceEGLBufferQueuePresenter::SupportsPlaneGpuFences() const { + return true; +} + +void GLSurfaceEGLBufferQueuePresenter::SetRelyOnImplicitSync() { + use_egl_fence_sync_ = false; +} + +bool GLSurfaceEGLBufferQueuePresenter::SupportsOverrideBufferCount() const { + return true; +} + +int GLSurfaceEGLBufferQueuePresenter::GetBufferCount() const { + return buffer_queue_->GetQueueSize(); +} + +bool GLSurfaceEGLBufferQueuePresenter::SupportsVisibilityControl() const { + return base::FeatureList::IsEnabled( + kBufferQueueSurfacePresenterVisibilityControl); +} + +void GLSurfaceEGLBufferQueuePresenter::SetVisibility(bool visibility) { + if (visibility) { + // Early end access to unsubmitted frame buffers (see SWAP_SKIPPED). + for (auto& frame : unsubmitted_frames_) { + if (frame->buffer) { + frame->buffer->EndAccess(); + frame->buffer.reset(); + } + } + } else { + last_buffer_sequence_number_.reset(); + } + + buffer_queue_->SetVisibility(visibility); +} + +scoped_refptr +GLSurfaceEGLBufferQueuePresenter::AllocatePrimaryPlaneBuffer( + const gfx::Size& size) { + DCHECK(task_runner_->BelongsToCurrentThread()); + DCHECK(buffer_queue_); + return buffer_queue_->AllocateBuffer(size); +} + +GLSurfaceEGLBufferQueuePresenter::PendingFrame::PendingFrame(uint32_t frame_id) + : frame_id(frame_id) {} + +GLSurfaceEGLBufferQueuePresenter::PendingFrame::~PendingFrame() = default; + +void GLSurfaceEGLBufferQueuePresenter::SubmitFrame() { + if (!unsubmitted_frames_.empty() && unsubmitted_frames_.front()->ready) { + auto frame = std::move(unsubmitted_frames_.front()); + unsubmitted_frames_.erase(unsubmitted_frames_.begin()); + + if (frame->buffer) { + bool commited = buffer_queue_->Present( + std::move(frame->buffer), frame->overlay_plane_data.damage_rect, + std::move(frame->gpu_fence), frame->local_frame); + last_swap_result_ = + commited ? gfx::SwapResult::SWAP_ACK : gfx::SwapResult::SWAP_FAILED; + } else { + // Buffer was freed on resizing or visibility change. + last_swap_result_ = gfx::SwapResult::SWAP_SKIPPED; + } + + if (last_swap_result_ != gfx::SwapResult::SWAP_ACK) { + std::move(frame->completion_callback) + .Run(gfx::SwapCompletionResult(last_swap_result_)); + // Notify the caller, the buffer is never presented on a screen. + std::move(frame->presentation_callback) + .Run(gfx::PresentationFeedback::Failure()); + return; + } + + std::move(frame->completion_callback) + .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK)); + + gfx::PresentationFeedback feedback(base::TimeTicks::Now(), + base::TimeDelta(), 0 /*flags*/); + + task_runner_->PostTask( + FROM_HERE, + base::BindOnce(&GLSurfaceEGLBufferQueuePresenter::OnPresentation, + weak_factory_.GetWeakPtr(), + std::move(frame->presentation_callback), + std::move(feedback))); + } +} + +EGLSyncKHR GLSurfaceEGLBufferQueuePresenter::InsertFence(bool implicit) { + const EGLint attrib_list[] = {EGL_SYNC_CONDITION_KHR, + EGL_SYNC_PRIOR_COMMANDS_IMPLICIT_EXTERNAL_ARM, + EGL_NONE}; + return eglCreateSyncKHR(GetEGLDisplay(), EGL_SYNC_FENCE_KHR, + implicit ? attrib_list : nullptr); +} + +void GLSurfaceEGLBufferQueuePresenter::FenceRetired(uint32_t frame_id) { + if (unsubmitted_frames_.empty() || + unsubmitted_frames_.front()->frame_id != frame_id) { + DCHECK(false); + return; + } + + unsubmitted_frames_.front()->ready = true; + SubmitFrame(); +} + +void GLSurfaceEGLBufferQueuePresenter::OnPresentation( + PresentationCallback presentation_callback, + gfx::PresentationFeedback feedback) { + DCHECK(!presentation_callback.is_null()); + std::move(presentation_callback).Run(std::move(feedback)); +} + +EGLDisplay GLSurfaceEGLBufferQueuePresenter::GetEGLDisplay() { + return display_->GetDisplay(); +} + +} // namespace gl diff --git a/ui/gl/gl_surface_egl_buffer_queue_presenter.h b/ui/gl/gl_surface_egl_buffer_queue_presenter.h new file mode 100644 index 0000000000..defa68d021 --- /dev/null +++ b/ui/gl/gl_surface_egl_buffer_queue_presenter.h @@ -0,0 +1,140 @@ +// Copyright 2024 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef UI_GL_GL_SURFACE_EGL_BUFFER_QUEUE_CONTROL_H_ +#define UI_GL_GL_SURFACE_EGL_BUFFER_QUEUE_CONTROL_H_ + +#include + +#include +#include + +#include "base/atomic_sequence_num.h" +#include "base/memory/raw_ptr.h" +#include "base/memory/weak_ptr.h" +#include "third_party/abseil-cpp/absl/types/optional.h" +#include "ui/gfx/color_space.h" +#include "ui/gfx/frame_data.h" +#include "ui/gfx/gpu_fence.h" +#include "ui/gfx/native_widget_types.h" +#include "ui/gfx/ohos/native_buffer_queue.h" +#include "ui/gfx/ohos/native_buffer_queue_surface.h" +#include "ui/gfx/ohos/scoped_oh_native_window.h" +#include "ui/gfx/overlay_plane_data.h" +#include "ui/gl/gl_display.h" +#include "ui/gl/gl_export.h" +#include "ui/gl/presenter.h" + +namespace base { +class SequencedTaskRunner; +class SingleThreadTaskRunner; +} // namespace base + +namespace gfx { +class OHNativeSurfaceFactory; +} // namespace gfx + +namespace gl { + +// A GLSurface for OpenHarmony platform that uses surfaceless drawing. +// Drawing and displaying happens directly through buffer queue native buffers. +class GL_EXPORT GLSurfaceEGLBufferQueuePresenter + : public Presenter, + public gfx::NativeBufferQueueSurface { + public: + GLSurfaceEGLBufferQueuePresenter( + gfx::OHNativeSurfaceFactory* surface_factory, + GLDisplayEGL* display, + gfx::AcceleratedWidget widget, + gfx::ScopedOHNativeWindow window); + + GLSurfaceEGLBufferQueuePresenter(const GLSurfaceEGLBufferQueuePresenter&) = + delete; + GLSurfaceEGLBufferQueuePresenter& operator=( + const GLSurfaceEGLBufferQueuePresenter&) = delete; + + bool Initialize(); + void Destroy(); + + // Presenter implementation. + bool Resize(const gfx::Size& size, + float scale_factor, + const gfx::ColorSpace& color_space, + bool has_alpha) override; + bool ScheduleOverlayPlane( + OverlayImage image, + std::unique_ptr gpu_fence, + const gfx::OverlayPlaneData& overlay_plane_data) override; + void Present(SwapCompletionCallback completion_callback, + PresentationCallback presentation_callback, + gfx::FrameData data) override; + bool SupportsPlaneGpuFences() const override; + void SetRelyOnImplicitSync() override; + bool SupportsOverrideBufferCount() const override; + int GetBufferCount() const override; + bool SupportsVisibilityControl() const override; + void SetVisibility(bool visibility) override; + + // NativeBufferQueueSurface implementation. + scoped_refptr AllocatePrimaryPlaneBuffer( + const gfx::Size& size) override; + + private: + ~GLSurfaceEGLBufferQueuePresenter() override; + + struct PendingFrame { + PendingFrame(uint32_t frame_id); + ~PendingFrame(); + + uint32_t frame_id; + bool ready = false; + bool local_frame = false; + scoped_refptr buffer; + std::unique_ptr gpu_fence; + gfx::OverlayPlaneData overlay_plane_data; + SwapCompletionCallback completion_callback; + PresentationCallback presentation_callback; + }; + + void SubmitFrame(); + + EGLSyncKHR InsertFence(bool implicit); + void FenceRetired(uint32_t frame_id); + + void OnPresentation(PresentationCallback presentation_callback, + gfx::PresentationFeedback feedback); + + EGLDisplay GetEGLDisplay(); + + const raw_ptr surface_factory_; + + gfx::AcceleratedWidget widget_; + + const bool has_implicit_external_sync_; + const raw_ptr display_; + + scoped_refptr task_runner_; + // The task runner to use for fence wait tasks. + scoped_refptr fence_wait_task_runner_; + + base::AtomicSequenceNumber next_frame_id_; + + // PendingFrames that are waiting to be submitted. They can be either ready, + // waiting for fences. + std::vector> unsubmitted_frames_; + gfx::SwapResult last_swap_result_ = gfx::SwapResult::SWAP_ACK; + bool use_egl_fence_sync_ = true; + + gfx::Size size_; + + absl::optional last_buffer_sequence_number_; + + std::unique_ptr buffer_queue_; + + base::WeakPtrFactory weak_factory_{this}; +}; + +} // namespace gl + +#endif // UI_GL_GL_SURFACE_EGL_BUFFER_QUEUE_CONTROL_H_ diff --git a/ui/gl/presenter.h b/ui/gl/presenter.h index b0e795fd30..e306813616 100644 --- a/ui/gl/presenter.h +++ b/ui/gl/presenter.h @@ -17,6 +17,10 @@ #include "ui/gfx/swap_result.h" #include "ui/gl/gl_export.h" +#if BUILDFLAG(IS_OHOS) +#include "ui/gfx/ohos/native_buffer.h" +#endif + #if BUILDFLAG(IS_OZONE) #include "ui/gfx/native_pixmap.h" #endif @@ -46,7 +50,9 @@ namespace gl { struct DCLayerOverlayParams; // OverlayImage is a platform specific type for overlay plane image data. -#if BUILDFLAG(IS_OZONE) +#if BUILDFLAG(IS_OHOS) +using OverlayImage = scoped_refptr; +#elif BUILDFLAG(IS_OZONE) using OverlayImage = scoped_refptr; #elif BUILDFLAG(IS_APPLE) using OverlayImage = gfx::ScopedIOSurface; @@ -150,6 +156,13 @@ class GL_EXPORT Presenter : public base::RefCounted { // Tells the presenter to rely on implicit sync when presenting buffers. virtual void SetRelyOnImplicitSync() {} +#if BUILDFLAG(IS_OHOS) + virtual bool SupportsOverrideBufferCount() const = 0; + virtual int GetBufferCount() const = 0; + virtual bool SupportsVisibilityControl() const = 0; + virtual void SetVisibility(bool visibility) {} +#endif + protected: friend class base::RefCounted; virtual ~Presenter(); -- Gitee