Loading...
Searching...
No Matches
GpuUtils.hpp
1#pragma once
2
3#include <avnd/introspection/gfx.hpp>
4#if SCORE_PLUGIN_GFX
5#include <Process/ExecutionContext.hpp>
6
7#include <Crousti/File.hpp>
8#include <Crousti/GppCoroutines.hpp>
9#include <Crousti/GppShaders.hpp>
10#include <Crousti/MessageBus.hpp>
11#include <Crousti/TextureConversion.hpp>
12#include <Crousti/TextureFormat.hpp>
13#include <Gfx/GfxExecNode.hpp>
14#include <Gfx/Graph/Node.hpp>
15#include <Gfx/Graph/OutputNode.hpp>
16#include <Gfx/Graph/RenderList.hpp>
17#include <Gfx/Graph/RenderState.hpp>
18
19#include <score/tools/ThreadPool.hpp>
20
21#include <ossia/detail/small_flat_map.hpp>
22
23#include <ossia-qt/invoke.hpp>
24
25#include <QCoreApplication>
26#include <QTimer>
27#include <QtGui/private/qrhi_p.h>
28
29#include <avnd/binding/ossia/metadatas.hpp>
30#include <avnd/binding/ossia/port_run_postprocess.hpp>
31#include <avnd/binding/ossia/port_run_preprocess.hpp>
32#include <avnd/binding/ossia/soundfiles.hpp>
33#include <avnd/concepts/parameter.hpp>
34#include <avnd/introspection/input.hpp>
35#include <avnd/introspection/output.hpp>
36#include <fmt/format.h>
37#include <gpp/layout.hpp>
38
39#include <score_plugin_avnd_export.h>
40
41namespace oscr
42{
43struct GpuWorker
44{
45 template <typename T>
46 void initWorker(this auto& self, std::shared_ptr<T>& state) noexcept
47 {
48 if constexpr(avnd::has_worker<T>)
49 {
50 auto ptr = QPointer{&self};
51 auto& tq = score::TaskPool::instance();
52 using worker_type = decltype(state->worker);
53
54 auto wk_state = std::weak_ptr{state};
55 state->worker.request = [ptr, &tq, wk_state]<typename... Args>(Args&&... f) {
56 using type_of_result = decltype(worker_type::work(std::forward<Args>(f)...));
57 tq.post([... ff = std::forward<Args>(f), wk_state, ptr]() mutable {
58 if constexpr(std::is_void_v<type_of_result>)
59 {
60 worker_type::work(std::forward<decltype(ff)>(ff)...);
61 }
62 else
63 {
64 // If the worker returns a std::function, it
65 // is to be invoked back in the processor DSP thread
66 auto res = worker_type::work(std::forward<decltype(ff)>(ff)...);
67 if(!res || !ptr)
68 return;
69
70 ossia::qt::run_async(
71 QCoreApplication::instance(),
72 [res = std::move(res), wk_state, ptr]() mutable {
73 if(ptr)
74 if(auto state = wk_state.lock())
75 res(*state);
76 });
77 }
78 });
79 };
80 }
81 }
82};
83
84template <typename GpuNodeRenderer, typename Node>
85struct GpuProcessIns
86{
87 GpuNodeRenderer& gpu;
88 Node& state;
89 const score::gfx::Message& prev_mess;
90 const score::gfx::Message& mess;
91 const score::DocumentContext& ctx;
92
93 bool can_process_message(std::size_t N)
94 {
95 if(mess.input.size() <= N)
96 return false;
97
98 if(prev_mess.input.size() == mess.input.size())
99 {
100 auto& prev = prev_mess.input[N];
101 auto& next = mess.input[N];
102 if(prev.index() == 1 && next.index() == 1)
103 {
104 if(ossia::get<ossia::value>(prev) == ossia::get<ossia::value>(next))
105 {
106 return false;
107 }
108 }
109 }
110 return true;
111 }
112
113 void operator()(avnd::parameter_port auto& t, auto field_index)
114 {
115 if(!can_process_message(field_index))
116 return;
117
118 if(auto val = ossia::get_if<ossia::value>(&mess.input[field_index]))
119 {
120 oscr::from_ossia_value(t, *val, t.value);
121 if_possible(t.update(state));
122 }
123 }
124
125#if OSCR_HAS_MMAP_FILE_STORAGE
126 template <avnd::raw_file_port Field, std::size_t NField>
127 void operator()(Field& t, avnd::field_index<NField> field_index)
128 {
129 // FIXME we should be loading a file there
130 using node_type = std::remove_cvref_t<decltype(gpu.node())>;
131 using file_ports = avnd::raw_file_input_introspection<Node>;
132
133 if(!can_process_message(field_index))
134 return;
135
136 auto val = ossia::get_if<ossia::value>(&mess.input[field_index]);
137 if(!val)
138 return;
139
140 static constexpr bool has_text = requires { decltype(Field::file)::text; };
141 static constexpr bool has_mmap = requires { decltype(Field::file)::mmap; };
142
143 // First we can load it directly since execution hasn't started yet
144 if(auto hdl = loadRawfile(*val, ctx, has_text, has_mmap))
145 {
146 static constexpr auto N = file_ports::field_index_to_index(NField);
147 if constexpr(avnd::port_can_process<Field>)
148 {
149 // FIXME also do it when we get a run-time message from the exec engine,
150 // OSC, etc
151 auto func = executePortPreprocess<Field>(*hdl);
152 const_cast<node_type&>(gpu.node())
153 .file_loaded(
154 state, hdl, avnd::predicate_index<N>{}, avnd::field_index<NField>{});
155 if(func)
156 func(state);
157 }
158 else
159 {
160 const_cast<node_type&>(gpu.node())
161 .file_loaded(
162 state, hdl, avnd::predicate_index<N>{}, avnd::field_index<NField>{});
163 }
164 }
165 }
166#endif
167
168 template <avnd::buffer_port Field, std::size_t NField>
169 void operator()(Field& t, avnd::field_index<NField> field_index)
170 {
171 using node_type = std::remove_cvref_t<decltype(gpu.node())>;
172 auto& node = const_cast<node_type&>(gpu.node());
173 auto val = ossia::get_if<ossia::render_target_spec>(&mess.input[field_index]);
174 if(!val)
175 return;
176 node.process(NField, *val);
177 }
178
179 template <avnd::texture_port Field, std::size_t NField>
180 void operator()(Field& t, avnd::field_index<NField> field_index)
181 {
182 using node_type = std::remove_cvref_t<decltype(gpu.node())>;
183 auto& node = const_cast<node_type&>(gpu.node());
184 auto val = ossia::get_if<ossia::render_target_spec>(&mess.input[field_index]);
185 if(!val)
186 return;
187 node.process(NField, *val);
188 }
189
190 template <avnd::geometry_port Field, std::size_t NField>
191 void operator()(Field& t, avnd::field_index<NField> field_index)
192 {
193 using node_type = std::remove_cvref_t<decltype(gpu.node())>;
194 auto& node = const_cast<node_type&>(gpu.node());
195
196 // FIXME
197 }
198
199 void operator()(auto& t, auto field_index) = delete;
200};
201
202struct GpuControlIns
203{
204 template <typename Self, typename Node_T>
205 static void processControlIn(
206 Self& self, Node_T& state, score::gfx::Message& renderer_mess,
207 const score::gfx::Message& mess, const score::DocumentContext& ctx) noexcept
208 {
209 // Apply the controls
210 avnd::input_introspection<Node_T>::for_all_n(
211 avnd::get_inputs<Node_T>(state),
212 GpuProcessIns<Self, Node_T>{self, state, renderer_mess, mess, ctx});
213 renderer_mess = mess;
214 }
215};
216
217struct GpuControlOuts
218{
219 std::weak_ptr<Execution::ExecutionCommandQueue> queue;
220 Gfx::exec_controls control_outs;
221
222 int64_t instance{};
223
224 template <typename Node_T>
225 void processControlOut(Node_T& state) const noexcept
226 {
227 if(!this->control_outs.empty())
228 {
229 auto q = this->queue.lock();
230 if(!q)
231 return;
232 auto& qq = *q;
233 int parm_k = 0;
234 avnd::parameter_output_introspection<Node_T>::for_all(
235 avnd::get_outputs(state), [&]<avnd::parameter_port T>(const T& t) {
236 qq.enqueue([v = oscr::to_ossia_value(t, t.value),
237 port = control_outs[parm_k]]() mutable {
238 std::swap(port->value, v);
239 port->changed = true;
240 });
241
242 parm_k++;
243 });
244 }
245 }
246};
247
248template <typename T>
249struct SCORE_PLUGIN_AVND_EXPORT GpuNodeElements
250{
251 [[no_unique_address]] oscr::soundfile_storage<T> soundfiles;
252
253 [[no_unique_address]] oscr::midifile_storage<T> midifiles;
254
255#if defined(OSCR_HAS_MMAP_FILE_STORAGE)
256 [[no_unique_address]] oscr::raw_file_storage<T> rawfiles;
257#endif
258
259 template <std::size_t N, std::size_t NField>
260 void file_loaded(
261 auto& state, const std::shared_ptr<oscr::raw_file_data>& hdl,
262 avnd::predicate_index<N>, avnd::field_index<NField>)
263 {
264 this->rawfiles.load(
265 state, hdl, avnd::predicate_index<N>{}, avnd::field_index<NField>{});
266 }
267};
268
269struct SCORE_PLUGIN_AVND_EXPORT CustomGfxNodeBase : score::gfx::NodeModel
270{
271 explicit CustomGfxNodeBase(const score::DocumentContext& ctx)
272 : score::gfx::NodeModel{}
273 , m_ctx{ctx}
274 {
275 }
276 virtual ~CustomGfxNodeBase();
277 const score::DocumentContext& m_ctx;
278 score::gfx::Message last_message;
279 void process(score::gfx::Message&& msg) override;
281};
282struct SCORE_PLUGIN_AVND_EXPORT CustomGfxOutputNodeBase : score::gfx::OutputNode
283{
284 virtual ~CustomGfxOutputNodeBase();
285
286 score::gfx::Message last_message;
287 void process(score::gfx::Message&& msg) override;
288};
289struct CustomGpuNodeBase
291 , GpuWorker
292 , GpuControlIns
293 , GpuControlOuts
294{
295 CustomGpuNodeBase(
296 std::weak_ptr<Execution::ExecutionCommandQueue>&& q, Gfx::exec_controls&& ctls,
297 const score::DocumentContext& ctx)
298 : GpuControlOuts{std::move(q), std::move(ctls)}
299 , m_ctx{ctx}
300 {
301 }
302
303 virtual ~CustomGpuNodeBase() = default;
304
305 const score::DocumentContext& m_ctx;
306 QString vertex, fragment, compute;
307 score::gfx::Message last_message;
308 void process(score::gfx::Message&& msg) override;
309};
310
311struct SCORE_PLUGIN_AVND_EXPORT CustomGpuOutputNodeBase
313 , GpuWorker
314 , GpuControlIns
315 , GpuControlOuts
316{
317 CustomGpuOutputNodeBase(
318 std::weak_ptr<Execution::ExecutionCommandQueue> q, Gfx::exec_controls&& ctls,
319 const score::DocumentContext& ctx);
320 virtual ~CustomGpuOutputNodeBase();
321
322 const score::DocumentContext& m_ctx;
323 std::weak_ptr<score::gfx::RenderList> m_renderer{};
324 std::shared_ptr<score::gfx::RenderState> m_renderState{};
325
326 QString vertex, fragment, compute;
327 score::gfx::Message last_message;
328 void process(score::gfx::Message&& msg) override;
330
331 void setRenderer(std::shared_ptr<score::gfx::RenderList>) override;
332 score::gfx::RenderList* renderer() const override;
333
334 void startRendering() override;
335 void render() override;
336 void stopRendering() override;
337 bool canRender() const override;
338 void onRendererChange() override;
339
340 void createOutput(score::gfx::OutputConfiguration) override;
341
342 void destroyOutput() override;
343 std::shared_ptr<score::gfx::RenderState> renderState() const override;
344
345 Configuration configuration() const noexcept override;
346};
347
348template <typename Node_T, typename Node>
349void prepareNewState(std::shared_ptr<Node_T>& eff, const Node& parent)
350{
351 if constexpr(avnd::has_worker<Node_T>)
352 {
353 parent.initWorker(eff);
354 }
355 if constexpr(avnd::has_processor_to_gui_bus<Node_T>)
356 {
357 auto& process = parent.processModel;
358 eff->send_message = [ptr = QPointer{&process}](auto&& b) mutable {
359 // FIXME right now all the rendering is done in the UI thread, which is very MEH
360 // this->in_edit([&process, bb = std::move(b)]() mutable {
361
362 if(ptr && ptr->to_ui)
363 MessageBusSender{ptr->to_ui}(std::move(b));
364 // });
365 };
366
367 // FIXME GUI -> engine. See executor.hpp
368 }
369
370 avnd::init_controls(*eff);
371
372 if constexpr(avnd::can_prepare<Node_T>)
373 {
374 if constexpr(avnd::function_reflection<&Node_T::prepare>::count == 1)
375 {
376 using prepare_type = avnd::first_argument<&Node_T::prepare>;
377 prepare_type t;
378 if_possible(t.instance = parent.instance);
379 eff->prepare(t);
380 }
381 else
382 {
383 eff->prepare();
384 }
385 }
386}
387
388struct port_to_type_enum
389{
390 template <std::size_t I, avnd::buffer_port F>
391 constexpr auto operator()(avnd::field_reflection<I, F> p)
392 {
393 return score::gfx::Types::Buffer;
394 }
395
396 template <std::size_t I, avnd::cpu_texture_port F>
397 constexpr auto operator()(avnd::field_reflection<I, F> p)
398 {
399 using texture_type = std::remove_cvref_t<decltype(F::texture)>;
400 return (avnd::cpu_fixed_format_texture<texture_type> || avnd::cpu_dynamic_format_texture<texture_type>)
401 ? score::gfx::Types::Image
402 : score::gfx::Types::Buffer;
403 }
404
405 template <std::size_t I, avnd::gpu_texture_port F>
406 constexpr auto operator()(avnd::field_reflection<I, F> p)
407 {
408 return score::gfx::Types::Image;
409 }
410
411 template <std::size_t I, avnd::sampler_port F>
412 constexpr auto operator()(avnd::field_reflection<I, F> p)
413 {
414 return score::gfx::Types::Image;
415 }
416 template <std::size_t I, avnd::image_port F>
417 constexpr auto operator()(avnd::field_reflection<I, F> p)
418 {
419 return score::gfx::Types::Image;
420 }
421 template <std::size_t I, avnd::attachment_port F>
422 constexpr auto operator()(avnd::field_reflection<I, F> p)
423 {
424 return score::gfx::Types::Image;
425 }
426
427 template <std::size_t I, avnd::geometry_port F>
428 constexpr auto operator()(avnd::field_reflection<I, F> p)
429 {
430 return score::gfx::Types::Geometry;
431 }
432 template <std::size_t I, avnd::mono_audio_port F>
433 constexpr auto operator()(avnd::field_reflection<I, F> p)
434 {
435 return score::gfx::Types::Audio;
436 }
437 template <std::size_t I, avnd::poly_audio_port F>
438 constexpr auto operator()(avnd::field_reflection<I, F> p)
439 {
440 return score::gfx::Types::Audio;
441 }
442 template <std::size_t I, avnd::int_parameter F>
443 constexpr auto operator()(avnd::field_reflection<I, F> p)
444 {
445 return score::gfx::Types::Int;
446 }
447 template <std::size_t I, avnd::enum_parameter F>
448 constexpr auto operator()(avnd::field_reflection<I, F> p)
449 {
450 return score::gfx::Types::Int;
451 }
452 template <std::size_t I, avnd::float_parameter F>
453 constexpr auto operator()(avnd::field_reflection<I, F> p)
454 {
455 return score::gfx::Types::Float;
456 }
457 template <std::size_t I, avnd::parameter_port F>
458 constexpr auto operator()(avnd::field_reflection<I, F> p)
459 {
460 using value_type = std::remove_cvref_t<decltype(F::value)>;
461
462 if constexpr(std::is_aggregate_v<value_type>)
463 {
464 constexpr int sz = boost::pfr::tuple_size_v<value_type>;
465 if constexpr(sz == 2)
466 {
467 return score::gfx::Types::Vec2;
468 }
469 else if constexpr(sz == 3)
470 {
471 return score::gfx::Types::Vec3;
472 }
473 else if constexpr(sz == 4)
474 {
475 return score::gfx::Types::Vec4;
476 }
477 }
478 return score::gfx::Types::Empty;
479 }
480 template <std::size_t I, typename F>
481 constexpr auto operator()(avnd::field_reflection<I, F> p)
482 {
483 return score::gfx::Types::Empty;
484 }
485};
486
487template <typename Node_T>
488inline void initGfxPorts(auto* self, auto& input, auto& output)
489{
490 avnd::input_introspection<Node_T>::for_all(
491 [self, &input]<typename Field, std::size_t I>(avnd::field_reflection<I, Field> f) {
492 static constexpr auto type = port_to_type_enum{}(f);
493 input.push_back(new score::gfx::Port{self, {}, type, {}, {}});
494 });
495 avnd::output_introspection<Node_T>::for_all(
496 [self,
497 &output]<typename Field, std::size_t I>(avnd::field_reflection<I, Field> f) {
498 static constexpr auto type = port_to_type_enum{}(f);
499 output.push_back(new score::gfx::Port{self, {}, type, {}, {}});
500 });
501}
502
503static score::gfx::BufferView getInputBuffer(
504 score::gfx::RenderList& renderer, const score::gfx::Node& parent, int port_index)
505{
506 const auto& inputs = parent.input;
507 // SCORE_ASSERT(port_index == 0);
508 {
509 score::gfx::Port* p = inputs[port_index];
510 for(auto& edge : p->edges)
511 {
512 auto src_node = edge->source->node;
513 score::gfx::NodeRenderer* src_renderer = src_node->renderedNodes.at(&renderer);
514 if(src_renderer)
515 {
516 return src_renderer->bufferForOutput(*edge->source);
517 }
518 break;
519 }
520 }
521 return {};
522}
523
524
525static void readbackInputBuffer(
526 score::gfx::RenderList& renderer
527 , QRhiResourceUpdateBatch& res
528 , const score::gfx::Node& parent
529 , QRhiBufferReadbackResult& readback
530 , int port_index
531 )
532{
533 // FIXME: instead of doing this we could do the readback in the
534 // producer node and just read its bytearray once...
535 if(auto buf = getInputBuffer(renderer, parent, port_index))
536 {
537 readback = {};
538 res.readBackBuffer(buf.handle, buf.byte_offset, buf.byte_size, &readback);
539 }
540}
541
542static void recreateOutputBuffer(
543 score::gfx::RenderList& renderer, avnd::cpu_buffer auto& cpu_buf,
544 QRhiResourceUpdateBatch& res, score::gfx::BufferView& buf)
545{
546 const auto bytesize = avnd::get_bytesize(cpu_buf);
547 if(!buf.handle)
548 {
549 if(bytesize > 0)
550 {
551 buf.handle = renderer.state.rhi->newBuffer(
552 QRhiBuffer::Static, QRhiBuffer::StorageBuffer | QRhiBuffer::VertexBuffer,
553 bytesize);
554 buf.handle->setName("GpuUtils::recreateOutputBuffer");
555 buf.byte_offset = 0;
556 buf.byte_size = bytesize;
557
558 buf.handle->create();
559 }
560 else
561 {
562 cpu_buf.changed = false;
563 return;
564 }
565 }
566 else if(buf.handle->size() != bytesize)
567 {
568 buf.handle->destroy();
569 buf.handle->setSize(bytesize);
570 buf.handle->create();
571 buf.byte_size = bytesize;
572 }
573}
574
575static void uploadOutputBuffer(
576 score::gfx::RenderList& renderer, avnd::cpu_buffer auto& cpu_buf,
577 QRhiResourceUpdateBatch& res, score::gfx::BufferView& rhi_buf)
578{
579 if(cpu_buf.changed)
580 {
581 recreateOutputBuffer(renderer, cpu_buf, res, rhi_buf);
583 &res, rhi_buf.handle, 0, cpu_buf.byte_size,
584 (const char*)avnd::get_bytes(cpu_buf));
585 cpu_buf.changed = false;
586 }
587}
588
589static void uploadOutputBuffer(
590 score::gfx::RenderList& renderer, avnd::gpu_buffer auto& gpu_buf,
591 QRhiResourceUpdateBatch& res, score::gfx::BufferView& rhi_buf)
592{
593 rhi_buf.handle = reinterpret_cast<QRhiBuffer*>(gpu_buf.handle);
594 rhi_buf.byte_size = gpu_buf.byte_size;
595 rhi_buf.byte_offset = gpu_buf.byte_offset;
596}
597
598template <typename T>
599struct geometry_inputs_storage;
600
601struct mesh_input_storage
602{
603 std::vector<QRhiBufferReadbackResult> readbacks;
604 std::vector<QRhiBuffer*> buffers;
605};
606struct geometry_input_storage
607{
608 ossia::geometry_spec spec;
609 std::vector<mesh_input_storage> meshes;
610};
611
612template <typename T>
613 requires(avnd::geometry_input_introspection<T>::size > 0)
614struct geometry_inputs_storage<T>
615{
616 // FIXME in Gfx/Graph/NodeRenderer.hpp
617 static_assert(avnd::geometry_input_introspection<T>::size == 1);
618
619 geometry_input_storage inputs[avnd::geometry_input_introspection<T>::size];
620 ossia::small_vector<QRhiBuffer*, 4> allocated;
621
622 void readInputGeometries(
623 score::gfx::RenderList& renderer, const ossia::geometry_spec& spec, auto& parent,
624 auto& state)
625 {
626 // Copy the readback output inside the structure
627 // TODO it would be much better to do this inside the readback's
628 // "completed" callback.
629 avnd::geometry_input_introspection<T>::for_all_n(
630 avnd::get_inputs<T>(state),
631 [&]<typename Field, std::size_t N>(Field& t, avnd::predicate_index<N> np) {
632 this->inputs[N].spec = spec; // FIXME multiple geometry input ports
633 this->inputs[N].meshes.resize(1); // FIXME
634
635 // Here we fetch the readbacks results
636 auto& meshes = this->inputs[N].meshes[0];
637
638 oscr::meshes_from_ossia(
639 spec.meshes, t.mesh,
640 [&](auto& write_buf, int buffer_index, void* data, int64_t bytesize) {
641 // CPU input geometry, upload was done before
642 SCORE_ASSERT(buffer_index >= 0);
643 if(buffer_index < meshes.readbacks.size())
644 {
645 QRhiBuffer* handle = meshes.buffers[buffer_index];
646 write_buf.handle = handle;
647 write_buf.byte_size = handle->size();
648 }
649 }, [&](auto& write_buf, int buffer_index, void* handle) {
650 // GPU input buffer, CPU output buffer: need to fetch our readback
651 SCORE_ASSERT(buffer_index >= 0);
652 if(buffer_index < meshes.readbacks.size())
653 {
654 // FIXME investigate why runInitialPasses is called before inputAboutToFinish
655 auto& readback = meshes.readbacks[buffer_index].data;
656 write_buf.raw_data = reinterpret_cast<unsigned char*>(readback.data());
657 write_buf.byte_size = readback.size();
658 }
659 });
660 });
661 }
662
663 void inputAboutToFinish(
664 score::gfx::RenderList& renderer, QRhiResourceUpdateBatch*& res,
665 const ossia::geometry_spec& spec, auto& state, auto& parent)
666 {
667 avnd::geometry_input_introspection<T>::for_all_n2(
668 avnd::get_inputs<T>(state),
669 [&]<typename Field, std::size_t N, std::size_t NField>(
670 Field& t, avnd::predicate_index<N> np, avnd::field_index<NField> nf) {
671 this->inputs[N].spec = spec; // FIXME multiple geometry input ports
672 this->inputs[N].meshes.resize(1); // FIXME
673 // Here we request readbacks if necessary
674
675 auto& meshes = this->inputs[N].meshes[0];
676 oscr::meshes_from_ossia(
677 spec.meshes, t.mesh,
678 [&](auto& write_buf, int buffer_index, void* data, int64_t bytesize) {
679 // cpu -> gpu
680 if(meshes.buffers.size() <= buffer_index)
681 {
682 meshes.buffers.resize(buffer_index + 1);
683 meshes.readbacks.resize(buffer_index + 1);
684
685 auto buf = renderer.state.rhi->newBuffer(
686 QRhiBuffer::Static, QRhiBuffer::StorageBuffer | QRhiBuffer::VertexBuffer,
687 bytesize);
688 buf->setName(oscr::getUtf8Name<T>() + "::" + oscr::getUtf8Name(t));
689 buf->create();
690 allocated.push_back(buf);
691 meshes.buffers[buffer_index] = buf;
692 }
693
694 res->uploadStaticBuffer(meshes.buffers[buffer_index], 0, bytesize, data);
695 }, [&](auto& write_buf, int buffer_index, void* handle) {
696 // gpu -> cpu
697 if(meshes.readbacks.size() <= buffer_index)
698 {
699 meshes.buffers.resize(buffer_index + 1);
700 meshes.readbacks.resize(buffer_index + 1);
701 }
702
703 meshes.readbacks[buffer_index] = {};
704 if(auto buf = static_cast<QRhiBuffer*>(handle))
705 {
706 meshes.buffers[buffer_index] = buf;
707 res->readBackBuffer(buf, 0, buf->size(), &meshes.readbacks[buffer_index]);
708 }
709 else
710 {
711 meshes.buffers[buffer_index] = {};
712 meshes.readbacks[buffer_index] = {};
713 }
714 });
715 });
716 }
717
718 void release(score::gfx::RenderList& renderer)
719 {
720 for(auto& buf : allocated)
721 renderer.releaseBuffer(buf);
722 allocated.clear();
723 }
724};
725
726template <typename T>
727 requires(avnd::geometry_input_introspection<T>::size == 0)
728struct geometry_inputs_storage<T>
729{
730 static void readInputBuffers(auto&&...) { }
731
732 static void inputAboutToFinish(auto&&...) { }
733};
734
735template<typename T>
736struct buffer_inputs_storage;
737
738template<typename T>
739 requires (avnd::buffer_input_introspection<T>::size > 0)
740struct buffer_inputs_storage<T>
741{
742 // +1 because of zero-array-size unsupported
743 QRhiBufferReadbackResult
744 m_readbacks[avnd::cpu_buffer_input_introspection<T>::size + 1];
745 score::gfx::BufferView m_gpubufs[avnd::gpu_buffer_input_introspection<T>::size + 1];
746
747 void readInputBuffers(
748 score::gfx::RenderList& renderer, auto& parent, auto& state)
749 {
750 if constexpr(avnd::cpu_buffer_input_introspection<T>::size > 0)
751 {
752 // Copy the readback output inside the structure
753 // TODO it would be much better to do this inside the readback's
754 // "completed" callback.
755 avnd::cpu_buffer_input_introspection<T>::for_all_n(
756 avnd::get_inputs<T>(state),
757 [&]<typename Field, std::size_t N>
758 (Field& t, avnd::predicate_index<N> np)
759 {
760 auto& readback = m_readbacks[N].data;
761 t.buffer.raw_data = reinterpret_cast<unsigned char*>(readback.data());
762 t.buffer.byte_size = readback.size();
763 t.buffer.byte_offset = 0; // FIXME
764 t.buffer.changed = true;
765 });
766 }
767
768 if constexpr(avnd::gpu_buffer_input_introspection<T>::size > 0)
769 {
770 // Copy the readback output inside the structure
771 // TODO it would be much better to do this inside the readback's
772 // "completed" callback.
773 avnd::gpu_buffer_input_introspection<T>::for_all_n2(
774 avnd::get_inputs<T>(state),
775 [&]<typename Field, std::size_t N, std::size_t NField>(
776 Field& t, avnd::predicate_index<N> np, avnd::field_index<NField> nf) {
777 score::gfx::BufferView& buf = m_gpubufs[N];
778 if(!buf)
779 buf = getInputBuffer(renderer, parent, nf);
780 if(!buf)
781 return;
782 t.buffer.handle = buf.handle;
783 t.buffer.byte_size = buf.byte_size;
784 t.buffer.byte_offset = buf.byte_offset;
785 // t.buffer.changed = true; FIXME
786 });
787 }
788 }
789
790 void inputAboutToFinish(
791 score::gfx::RenderList& renderer,
792 QRhiResourceUpdateBatch*& res,
793 auto& state,
794 auto& parent)
795 {
796 avnd::cpu_buffer_input_introspection<T>::for_all_n2(
797 avnd::get_inputs<T>(state),
798 [&]<typename Field, std::size_t N, std::size_t NField>
799 (Field& port, avnd::predicate_index<N> np, avnd::field_index<NField> nf) {
800 readbackInputBuffer(renderer, *res, parent, m_readbacks[N], nf);
801 });
802 avnd::gpu_buffer_input_introspection<T>::for_all_n2(
803 avnd::get_inputs<T>(state),
804 [&]<typename Field, std::size_t N, std::size_t NField>
805 (Field& port, avnd::predicate_index<N> np, avnd::field_index<NField> nf) {
806 m_gpubufs[N] = getInputBuffer(renderer, parent, nf);
807 });
808 }
809};
810
811template<typename T>
812 requires (avnd::buffer_input_introspection<T>::size == 0)
813struct buffer_inputs_storage<T>
814{
815 static void readInputBuffers(auto&&...)
816 {
817
818 }
819
820 static void inputAboutToFinish(auto&&...)
821 {
822
823 }
824};
825
826struct MaybeOwnedBuffer : score::gfx::BufferView
827{
828 bool owned{false};
829};
830
831template<typename T>
832struct buffer_outputs_storage;
833
834template<typename T>
835 requires (avnd::buffer_output_introspection<T>::size > 0)
836struct buffer_outputs_storage<T>
837{
838 std::pair<const score::gfx::Port*, MaybeOwnedBuffer>
839 m_buffers[avnd::buffer_output_introspection<T>::size];
840
841 QRhiResourceUpdateBatch* currentResourceUpdateBatch{};
842
843 template <typename Field, std::size_t N, std::size_t NField>
844 requires avnd::cpu_buffer<std::decay_t<decltype(Field::buffer)>>
845 void createOutput(
846 score::gfx::RenderList& renderer, auto& parent, Field& port,
847 avnd::predicate_index<N> np, avnd::field_index<NField> nf)
848 {
849 auto& [gfx_port, buf] = m_buffers[N];
850 gfx_port = parent.output[nf];
851 buf.handle = renderer.state.rhi->newBuffer(
852 QRhiBuffer::Static, QRhiBuffer::StorageBuffer | QRhiBuffer::VertexBuffer, 1);
853 buf.handle->setName(oscr::getUtf8Name<T>() + "::" + oscr::getUtf8Name(port));
854 buf.byte_offset = 0;
855 buf.byte_size = 1;
856 buf.owned = true;
857
858 buf.handle->create();
859
860 port.buffer.upload
861 = [this, &renderer, &port](const char* data, int64_t offset, int64_t bytesize) {
862 // FIXME is offset and bytesize relative to the input or the output data ?
863 SCORE_ASSERT(currentResourceUpdateBatch);
864 auto& [gfx_port, buf] = m_buffers[N];
865
866 if(!buf.handle)
867 {
868 if(bytesize > 0)
869 {
870 buf.handle = renderer.state.rhi->newBuffer(
871 QRhiBuffer::Static, QRhiBuffer::StorageBuffer | QRhiBuffer::VertexBuffer,
872 bytesize);
873 buf.handle->setName(oscr::getUtf8Name<T>() + "::" + oscr::getUtf8Name(port));
874 buf.byte_offset = 0;
875 buf.byte_size = bytesize;
876 buf.owned = true;
877
878 buf.handle->create();
879 }
880 else
881 {
882 buf.handle = renderer.state.rhi->newBuffer(
883 QRhiBuffer::Static, QRhiBuffer::StorageBuffer | QRhiBuffer::VertexBuffer,
884 1);
885 buf.handle->setName(oscr::getUtf8Name<T>() + "::" + oscr::getUtf8Name(port));
886 buf.byte_offset = 0;
887 buf.byte_size = 1;
888 buf.owned = true;
889
890 buf.handle->create();
891 return;
892 }
893 }
894 else if(buf.handle->size() != bytesize)
895 {
896 buf.handle->destroy();
897 buf.handle->setSize(bytesize);
898 buf.handle->create();
899 buf.byte_size = bytesize;
900 }
901
903 currentResourceUpdateBatch, buf.handle, offset, bytesize, data);
904 };
905 }
906
907 template <typename Field, std::size_t N, std::size_t NField>
908 requires avnd::gpu_buffer<std::decay_t<decltype(Field::buffer)>>
909 void createOutput(
910 score::gfx::RenderList& renderer, auto& parent, Field& port,
911 avnd::predicate_index<N> np, avnd::field_index<NField> nf)
912 {
913 auto& [gfx_port, buf] = m_buffers[N];
914 gfx_port = parent.output[nf];
915 buf.handle = reinterpret_cast<QRhiBuffer*>(port.buffer.handle);
916 buf.byte_size = port.buffer.byte_size;
917 buf.byte_offset = port.buffer.byte_offset;
918 buf.owned = false;
919 }
920
921 void init(score::gfx::RenderList& renderer, auto& state, auto& parent)
922 {
923 // Init buffers for the outputs
924 avnd::buffer_output_introspection<T>::for_all_n2(
925 avnd::get_outputs<T>(state), [&]<typename Field, std::size_t N, std::size_t NField>
926 (Field& port, avnd::predicate_index<N> np, avnd::field_index<NField> nf) {
927 SCORE_ASSERT(parent.output.size() > nf);
928 SCORE_ASSERT(parent.output[nf]->type == score::gfx::Types::Buffer);
929 using buffer_type = std::decay_t<decltype(port.buffer)>;
930
931 if constexpr(avnd::cpu_raw_buffer<buffer_type> && requires {
932 port.buffer.upload(nullptr, 0, 0);
933 })
934 {
935 createOutput(renderer, parent, port, np, nf);
936 }
937 else if constexpr(avnd::gpu_buffer<buffer_type>)
938 {
939 createOutput(renderer, parent, port, np, nf);
940 }
941 else
942 {
943 // m_buffers[N] = createOutput(renderer, *parent.output[nf], port.buffer);
944 static_assert(std::is_same_v<T, void>, "unsupported");
945 }
946 });
947 }
948
949 void prepareUpload(QRhiResourceUpdateBatch& res)
950 {
951 currentResourceUpdateBatch = &res;
952 }
953
954 void upload(score::gfx::RenderList& renderer, auto& state, QRhiResourceUpdateBatch& res)
955 {
956 avnd::buffer_output_introspection<T>::for_all_n(
957 avnd::get_outputs<T>(state), [&]<std::size_t N>(auto& t, avnd::predicate_index<N> idx) {
958 auto& [port, buf] = m_buffers[N];
959 uploadOutputBuffer(renderer, t.buffer, res, buf);
960 });
961 }
962
963 void release(score::gfx::RenderList& renderer)
964 {
965 // Free outputs
966 for(auto& [p, buf] : m_buffers)
967 {
968 if(buf.owned)
969 renderer.releaseBuffer(buf.handle);
970 buf.handle = nullptr;
971 buf.owned = false;
972 }
973 }
974};
975
976template<typename T>
977 requires (avnd::buffer_output_introspection<T>::size == 0)
978struct buffer_outputs_storage<T>
979{
980 static void init(auto&&...)
981 {
982
983 }
984
985 static void prepareUpload(auto&&...)
986 {
987 }
988
989 static void upload(auto&&...)
990 {
991 }
992
993 static void release(auto&&...)
994 {
995 }
996};
997
998
999template <typename Tex>
1000static auto
1001createOutputTexture(score::gfx::RenderList& renderer, const Tex& texture_spec, QSize size)
1002{
1003 auto& rhi = *renderer.state.rhi;
1004 QRhiTexture* texture = &renderer.emptyTexture();
1005 if(size.width() > 0 && size.height() > 0)
1006 {
1007 texture = rhi.newTexture(
1008 gpp::qrhi::textureFormat(texture_spec), size, 1, QRhiTexture::Flag{});
1009
1010 texture->create();
1011 }
1012
1013 auto sampler = rhi.newSampler(
1014 QRhiSampler::Linear, QRhiSampler::Linear, QRhiSampler::None,
1015 QRhiSampler::ClampToEdge, QRhiSampler::ClampToEdge);
1016
1017 sampler->create();
1018 return score::gfx::Sampler{sampler, texture};
1019}
1020
1021
1022template<typename T>
1023struct texture_inputs_storage;
1024
1025template<typename T>
1026 requires (avnd::texture_input_introspection<T>::size > 0)
1027struct texture_inputs_storage<T>
1028{
1029 ossia::small_flat_map<const score::gfx::Port*, score::gfx::TextureRenderTarget, 2>
1030 m_rts;
1031
1032 QRhiReadbackResult m_readbacks[avnd::texture_input_introspection<T>::size];
1033
1034 template <typename Tex>
1035 QRhiTexture* createInput(
1036 score::gfx::RenderList& renderer, score::gfx::Port* port, Tex& texture_spec,
1038 {
1039 static constexpr auto flags
1040 = QRhiTexture::RenderTarget | QRhiTexture::UsedAsTransferSource;
1041 QRhiTexture::Format fmt{};
1042 if constexpr(requires (Tex tex) { tex.format = {}; } && !requires (Tex tex) { tex.request_format; })
1043 {
1044 // Format freely assignable: we use what the user sets in the GUI
1045 fmt = spec.format;
1046 gpp::qrhi::toTextureFormat(fmt, texture_spec);
1047 }
1048 else
1049 {
1050 fmt = gpp::qrhi::textureFormat(texture_spec);
1051 }
1052
1053 QRhiTexture* texture = renderer.state.rhi->newTexture(
1054 fmt, spec.size, 1, flags);
1055
1056 SCORE_ASSERT(texture->create());
1057 m_rts[port] = score::gfx::createRenderTarget(
1058 renderer.state, texture, renderer.samples(), renderer.requiresDepth(*port));
1059 return texture;
1060 }
1061
1062 void init(auto& self, score::gfx::RenderList& renderer)
1063 {
1064 // Init input render targets
1065 avnd::texture_input_introspection<T>::for_all_n2(
1066 avnd::get_inputs<T>(*self.state),
1067 [&]<typename F, std::size_t K, std::size_t N>(F& t, avnd::predicate_index<K>, avnd::field_index<N>) {
1068 auto& parent = self.node();
1069 auto spec = parent.resolveRenderTargetSpecs(N, renderer);
1070 if constexpr(requires {
1071 t.request_width;
1072 t.request_height;
1073 })
1074 {
1075 spec.size.rwidth() = t.request_width;
1076 spec.size.rheight() = t.request_height;
1077 }
1078
1079 auto tex = createInput(renderer, parent.input[N], t.texture, spec);
1080 if constexpr(avnd::cpu_texture_port<F>)
1081 {
1082 t.texture.width = spec.size.width();
1083 t.texture.height = spec.size.height();
1084 }
1085 else if constexpr(avnd::gpu_texture_port<F>)
1086 {
1087 t.texture.handle = tex;
1088 t.texture.width = spec.size.width();
1089 t.texture.height = spec.size.height();
1090 }
1091 });
1092 }
1093
1094 bool update(auto& self,
1095 score::gfx::RenderList& renderer, QRhiResourceUpdateBatch& res)
1096 {
1097#if 0
1098 bool need_update = false;
1099 avnd::texture_input_introspection<T>::for_all_n2(
1100 avnd::get_inputs<T>(*self.state),
1101 [&]<typename F, std::size_t K, std::size_t N>(F& t, avnd::predicate_index<K>, avnd::field_index<N>) {
1102 if constexpr(requires {
1103 t.request_width;
1104 t.request_height;
1105 })
1106 {
1107 auto& parent = self.node();
1108 auto port = parent.input[N];
1109 const score::gfx::TextureRenderTarget& texture = m_rts[port];
1110 QSizeF sz{};
1111 if(texture.texture)
1112 sz = texture.texture->pixelSize();
1113 if(sz.width() != t.request_width || sz.height() != t.request_height)
1114 {
1115 // FIXME right now this doesn't work because
1116 // the render target spec is stored in the node.
1117 // Also the RenderList just recomputes everything anyways,
1118 // so we should just emit a "need to change" signal and abort as
1119 // long as things aren't more optimized and actually follow the graph
1120
1121 // m_rts[port].release();
1122
1123 // auto spec = parent.resolveRenderTargetSpecs(N, renderer);
1124 // spec.size.rwidth() = t.request_width;
1125 // spec.size.rheight() = t.request_height;
1126
1127 // createInput(renderer, port, t.texture, sz);
1128
1129 // t.texture.width = spec.size.width();
1130 // t.texture.height = spec.size.height();
1131 // need_update = true;
1132 //
1133 }
1134 }
1135 });
1136 return need_update;
1137#endif
1138 return false;
1139 }
1140
1141 void runInitialPasses(auto& self, QRhi& rhi)
1142 {
1143 // Fetch input textures (if any)
1144 // Copy the readback output inside the structure
1145 // TODO it would be much better to do this inside the readback's
1146 // "completed" callback.
1147 if constexpr(avnd::cpu_texture_input_introspection<T>::size > 0)
1148 {
1149 avnd::texture_input_introspection<T>::for_all_n(
1150 avnd::get_inputs<T>(*self.state), [&]<typename F, std::size_t K>(F& t, avnd::predicate_index<K>) {
1151 if constexpr(avnd::cpu_texture_port<F>)
1152 {
1153 oscr::loadInputTexture(rhi, m_readbacks, t.texture, K);
1154 }
1155 });
1156 }
1157 }
1158
1159 void release()
1160 {
1161 // Free inputs
1162 // TODO investigate why reference does not work here:
1163 for(auto [port, rt] : m_rts)
1164 rt.release();
1165 m_rts.clear();
1166 }
1167
1168 void inputAboutToFinish(auto& parent, const score::gfx::Port& p, QRhiResourceUpdateBatch*& res)
1169 {
1170 if constexpr(avnd::cpu_texture_input_introspection<T>::size > 0)
1171 {
1172 const auto& inputs = parent.input;
1173 auto index_of_port = ossia::find(inputs, &p) - inputs.begin();
1174 {
1175 auto tex = m_rts[&p].texture;
1176 auto& readback = m_readbacks[index_of_port];
1177 readback = {};
1178 res->readBackTexture(QRhiReadbackDescription{tex}, &readback);
1179 }
1180 }
1181 }
1182
1183};
1184template<typename T>
1185 requires (avnd::texture_input_introspection<T>::size == 0)
1186struct texture_inputs_storage<T>
1187{
1188 static void init(auto&&...) { }
1189 static void runInitialPasses(auto&&...) { }
1190 static void release(auto&&...) { }
1191 static void inputAboutToFinish(auto&&...) { }
1192};
1193
1194
1195
1196template <avnd::cpu_texture Tex>
1197static QRhiTexture* updateTexture(auto& self, score::gfx::RenderList& renderer, int k, const Tex& cpu_tex)
1198{
1199 auto& [sampler, texture] = self.m_samplers[k];
1200 if(texture)
1201 {
1202 auto sz = texture->pixelSize();
1203 if(cpu_tex.width == sz.width() && cpu_tex.height == sz.height())
1204 return texture;
1205 }
1206
1207 // Check the texture size
1208 if(cpu_tex.width > 0 && cpu_tex.height > 0)
1209 {
1210 QRhiTexture* oldtex = texture;
1211 QRhiTexture* newtex = renderer.state.rhi->newTexture(
1212 gpp::qrhi::textureFormat(cpu_tex), QSize{cpu_tex.width, cpu_tex.height}, 1,
1213 QRhiTexture::Flag{});
1214 newtex->create();
1215 for(auto& [edge, pass] : self.m_p)
1216 if(pass.srb)
1217 score::gfx::replaceTexture(*pass.srb, sampler, newtex);
1218 texture = newtex;
1219
1220 if(oldtex && oldtex != &renderer.emptyTexture())
1221 {
1222 oldtex->deleteLater();
1223 }
1224
1225 return newtex;
1226 }
1227 else
1228 {
1229 for(auto& [edge, pass] : self.m_p)
1230 if(pass.srb)
1231 score::gfx::replaceTexture(*pass.srb, sampler, &renderer.emptyTexture());
1232
1233 return &renderer.emptyTexture();
1234 }
1235}
1236
1237template <avnd::cpu_texture Tex>
1238static void uploadOutputTexture(auto& self,
1239 score::gfx::RenderList& renderer, int k, Tex& cpu_tex,
1240 QRhiResourceUpdateBatch* res)
1241{
1242 if(cpu_tex.changed)
1243 {
1244 if(auto texture = updateTexture(self, renderer, k, cpu_tex))
1245 {
1246 QByteArray buf
1247 = QByteArray::fromRawData((const char*)cpu_tex.bytes, cpu_tex.bytesize());
1248 if constexpr(requires { Tex::RGB; })
1249 {
1250 // RGB -> RGBA
1251 // FIXME other conversions
1252 const QByteArray rgb = buf;
1253 QByteArray rgba;
1254 rgba.resize(cpu_tex.width * cpu_tex.height * 4);
1255 auto src = (const unsigned char*)rgb.constData();
1256 auto dst = (unsigned char*)rgba.data();
1257 for(int rgb_byte = 0, rgba_byte = 0, N = rgb.size(); rgb_byte < N;)
1258 {
1259 dst[rgba_byte + 0] = src[rgb_byte + 0];
1260 dst[rgba_byte + 1] = src[rgb_byte + 1];
1261 dst[rgba_byte + 2] = src[rgb_byte + 2];
1262 dst[rgba_byte + 3] = 255;
1263 rgb_byte += 3;
1264 rgba_byte += 4;
1265 }
1266 buf = rgba;
1267 }
1268
1269 // Upload it (mirroring is done in shader generic_texgen_fs if necessary)
1270 {
1271 QRhiTextureSubresourceUploadDescription sd(buf);
1272 QRhiTextureUploadDescription desc{QRhiTextureUploadEntry{0, 0, sd}};
1273
1274 res->uploadTexture(texture, desc);
1275 }
1276
1277 cpu_tex.changed = false;
1278 }
1279 }
1280}
1281
1282static const constexpr auto generic_texgen_vs = R"_(#version 450
1283layout(location = 0) in vec2 position;
1284layout(location = 1) in vec2 texcoord;
1285
1286layout(binding=3) uniform sampler2D y_tex;
1287layout(location = 0) out vec2 v_texcoord;
1288
1289layout(std140, binding = 0) uniform renderer_t {
1290 mat4 clipSpaceCorrMatrix;
1291 vec2 renderSize;
1292} renderer;
1293
1294out gl_PerVertex { vec4 gl_Position; };
1295
1296void main()
1297{
1298#if defined(QSHADER_SPIRV) || defined(QSHADER_GLSL)
1299 v_texcoord = vec2(texcoord.x, 1. - texcoord.y);
1300#else
1301 v_texcoord = texcoord;
1302#endif
1303 gl_Position = renderer.clipSpaceCorrMatrix * vec4(position.xy, 0.0, 1.);
1304}
1305)_";
1306
1307static const constexpr auto generic_texgen_fs = R"_(#version 450
1308layout(location = 0) in vec2 v_texcoord;
1309layout(location = 0) out vec4 fragColor;
1310
1311layout(std140, binding = 0) uniform renderer_t {
1312mat4 clipSpaceCorrMatrix;
1313vec2 renderSize;
1314} renderer;
1315
1316layout(binding=3) uniform sampler2D y_tex;
1317
1318void main ()
1319{
1320 fragColor = texture(y_tex, v_texcoord);
1321}
1322)_";
1323
1324template<typename T>
1325struct texture_outputs_storage;
1326
1327// If we have texture outs we need the whole rendering infrastructure
1328template<typename T>
1329 requires (avnd::texture_output_introspection<T>::size > 0)
1330struct texture_outputs_storage<T>
1331{
1332 void init(auto& self, score::gfx::RenderList& renderer, QRhiResourceUpdateBatch& res)
1333 {
1334 const auto& mesh = renderer.defaultTriangle();
1335 self.defaultMeshInit(renderer, mesh, res);
1336 self.processUBOInit(renderer);
1337 // Not needed here as we do not have a GPU pass:
1338 // this->m_material.init(renderer, this->node.input, this->m_samplers);
1339
1340 std::tie(self.m_vertexS, self.m_fragmentS)
1341 = score::gfx::makeShaders(renderer.state, generic_texgen_vs, generic_texgen_fs);
1342
1343 avnd::cpu_texture_output_introspection<T>::for_all(
1344 avnd::get_outputs<T>(*self.state), [&](auto& t) {
1345 self.m_samplers.push_back(
1346 createOutputTexture(renderer, t.texture, QSize{t.texture.width, t.texture.height}));
1347 });
1348
1349 self.defaultPassesInit(renderer, mesh);
1350 }
1351
1352 void runInitialPasses(auto& self,
1353 score::gfx::RenderList& renderer,
1354 QRhiResourceUpdateBatch*& res)
1355 {
1356 avnd::cpu_texture_output_introspection<T>::for_all_n(
1357 avnd::get_outputs<T>(*self.state), [&]<std::size_t N>(auto& t, avnd::predicate_index<N>) {
1358 uploadOutputTexture(self, renderer, N, t.texture, res);
1359 });
1360 }
1361
1362 void release(auto& self, score::gfx::RenderList& r)
1363 {
1364 // Free outputs
1365 for(auto& [sampl, texture] : self.m_samplers)
1366 {
1367 if(texture != &r.emptyTexture())
1368 texture->deleteLater();
1369 texture = nullptr;
1370 }
1371 }
1372
1373};
1374
1375template<typename T>
1376 requires (avnd::texture_output_introspection<T>::size == 0)
1377struct texture_outputs_storage<T>
1378{
1379 static void init(auto& self, score::gfx::RenderList& renderer, QRhiResourceUpdateBatch& res)
1380 {
1381 }
1382
1383 static void runInitialPasses(auto& self,
1384 score::gfx::RenderList& renderer,
1385 QRhiResourceUpdateBatch*& res)
1386 {
1387 }
1388
1389 static void release(auto& self, score::gfx::RenderList& r)
1390 {
1391 }
1392};
1393template<typename T>
1394struct geometry_outputs_storage;
1395
1396template<typename T>
1397 requires (avnd::geometry_output_introspection<T>::size > 0)
1398struct geometry_outputs_storage<T>
1399{
1400 ossia::geometry_spec specs[avnd::geometry_output_introspection<T>::size];
1401
1402 template <avnd::geometry_port Field>
1403 void reload_mesh(Field& ctrl, ossia::geometry_spec& spc)
1404 {
1405 spc.meshes = std::make_shared<ossia::mesh_list>();
1406 auto& ossia_meshes = *spc.meshes;
1407 if constexpr(avnd::static_geometry_type<Field> || avnd::dynamic_geometry_type<Field>)
1408 {
1409 ossia_meshes.meshes.resize(1);
1410 load_geometry(ctrl, ossia_meshes.meshes[0]);
1411 }
1412 else if constexpr(
1413 avnd::static_geometry_type<decltype(Field::mesh)>
1414 || avnd::dynamic_geometry_type<decltype(Field::mesh)>)
1415 {
1416 ossia_meshes.meshes.resize(1);
1417 load_geometry(ctrl.mesh, ossia_meshes.meshes[0]);
1418 }
1419 else
1420 {
1421 load_geometry(ctrl, ossia_meshes);
1422 }
1423 }
1424
1425 template <avnd::geometry_port Field, std::size_t N>
1426 void upload(
1427 score::gfx::RenderList& renderer, Field& ctrl, score::gfx::Edge& edge,
1428 avnd::predicate_index<N>)
1429 {
1430 auto edge_sink = edge.sink;
1431 if(auto pnode = edge_sink->node)
1432 {
1433 ossia::geometry_spec& spc = specs[N];
1434
1435 // 1. Reload mesh
1436 {
1437 if(ctrl.dirty_mesh)
1438 {
1439 reload_mesh(ctrl, spc);
1440 }
1441 else
1442 {
1443 if(spc.meshes)
1444 {
1445 auto& ossia_meshes = *spc.meshes;
1446
1447 bool any_need_reload = false;
1448 bool any_need_upload = false;
1449 if constexpr(avnd::static_geometry_type<Field> || avnd::dynamic_geometry_type<Field>)
1450 {
1451 SCORE_ASSERT(ossia_meshes.meshes.size() == 1);
1452 auto [need_reload, need_upload]
1453 = update_geometry(ctrl, ossia_meshes.meshes[0]);
1454 any_need_reload = need_reload;
1455 any_need_upload = need_upload;
1456 }
1457 else if constexpr(
1458 avnd::static_geometry_type<decltype(Field::mesh)>
1459 || avnd::dynamic_geometry_type<decltype(Field::mesh)>)
1460 {
1461 SCORE_ASSERT(ossia_meshes.meshes.size() == 1);
1462 auto [need_reload, need_upload]
1463 = update_geometry(ctrl.mesh, ossia_meshes.meshes[0]);
1464 any_need_reload = need_reload;
1465 any_need_upload = need_upload;
1466 }
1467 else
1468 {
1469 auto [need_reload, need_upload] = update_geometry(ctrl, ossia_meshes);
1470 any_need_reload = need_reload;
1471 any_need_upload = need_upload;
1472 }
1473
1474 if(any_need_reload)
1475 {
1476 reload_mesh(ctrl, spc);
1477 }
1478 }
1479 }
1480 ctrl.dirty_mesh = false;
1481 }
1482
1483 // 2. Push to next node
1484 // FIXME this should be for the renderer of edge, not the node, since
1485 // geometries can have gpu buffers
1486 auto rendered_node = pnode->renderedNodes.find(&renderer);
1487 SCORE_ASSERT(rendered_node != pnode->renderedNodes.end());
1488
1489 auto it = std::find(
1490 edge_sink->node->input.begin(), edge_sink->node->input.end(), edge_sink);
1491 SCORE_ASSERT(it != edge_sink->node->input.end());
1492 int n = it - edge_sink->node->input.begin();
1493
1494 rendered_node->second->process(n, spc);
1495
1496 // 3. Same for transform3d
1497
1498 if constexpr(requires { ctrl.transform; })
1499 {
1500 if(ctrl.dirty_transform)
1501 {
1502 ossia::transform3d transform;
1503 std::copy_n(ctrl.transform, std::ssize(ctrl.transform), transform.matrix);
1504 ctrl.dirty_transform = false;
1505
1506 rendered_node->second->process(n, transform);
1507 if(auto pnode = dynamic_cast<score::gfx::ProcessNode*>(edge_sink->node))
1508 pnode->process(n, transform);
1509 }
1510 }
1511 }
1512 }
1513
1514 void upload(score::gfx::RenderList& renderer, auto& state, score::gfx::Edge& edge)
1515 {
1516 // FIXME we need something such as port_run_{pre,post}process for GPU nodes
1517 avnd::geometry_output_introspection<T>::for_all_n(
1518 avnd::get_outputs(state),
1519 [&](auto& field, auto pred) { this->upload(renderer, field, edge, pred); });
1520 }
1521};
1522
1523
1524template<typename T>
1525 requires (avnd::geometry_output_introspection<T>::size == 0)
1526struct geometry_outputs_storage<T>
1527{
1528 static void upload(auto&&...)
1529 {
1530
1531 }
1532};
1533}
1534
1535#endif
Root data model for visual nodes.
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:74
std::vector< Port * > input
Input ports of that node.
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:103
virtual void process(Message &&msg)
Process a message from the execution engine.
Definition Node.cpp:25
ossia::small_pod_vector< Port *, 1 > output
Output ports of that node.
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:109
Common base class for most single-pass, simple nodes.
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:203
Renderer for a given node.
Definition NodeRenderer.hpp:11
Base class for sink nodes (QWindow, spout, syphon, NDI output, ...)
Definition OutputNode.hpp:31
Common base class for nodes that map to score processes.
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:176
List of nodes to be rendered to an output.
Definition RenderList.hpp:19
bool requiresDepth(score::gfx::Port &p) const noexcept
Whether this list of rendering actions requires depth testing at all.
Definition RenderList.cpp:444
const score::gfx::Mesh & defaultTriangle() const noexcept
A triangle mesh correct for this API.
Definition RenderList.cpp:483
RenderState & state
RenderState corresponding to this RenderList.
Definition RenderList.hpp:102
QRhiTexture & emptyTexture() const noexcept
Texture to use when a texture is missing.
Definition RenderList.hpp:125
TreeNode< DeviceExplorerNode > Node
Definition DeviceNode.hpp:74
Definition Factories.hpp:19
TextureRenderTarget createRenderTarget(const RenderState &state, QRhiTexture *tex, int samples, bool depth, bool samplableDepth)
Create a render target from a texture.
Definition score-plugin-gfx/Gfx/Graph/Utils.cpp:11
void uploadStaticBufferWithStoredData(QRhiResourceUpdateBatch *ub, QRhiBuffer *buf, int offset, int64_t bytesize, const char *data)
Schedule a Static buffer update when we can guarantee the buffer outlives the frame.
Definition score-plugin-gfx/Gfx/Graph/Utils.hpp:406
std::pair< QShader, QShader > makeShaders(const RenderState &v, QString vert, QString frag)
Get a pair of compiled vertex / fragment shaders from GLSL 4.5 sources.
Definition score-plugin-gfx/Gfx/Graph/Utils.cpp:647
Base toolkit upon which the software is built.
Definition Application.cpp:113
STL namespace.
Definition DocumentContext.hpp:18
Definition Mesh.hpp:15
Connection between two score::gfx::Port.
Definition score-plugin-gfx/Gfx/Graph/Utils.hpp:75
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:49
Definition OutputNode.hpp:11
Port of a score::gfx::Node.
Definition score-plugin-gfx/Gfx/Graph/Utils.hpp:54
Definition score-plugin-gfx/Gfx/Graph/Node.hpp:56
Stores a sampler and the texture currently associated with it.
Definition score-plugin-gfx/Gfx/Graph/Utils.hpp:27
Useful abstraction for storing all the data related to a render target.
Definition score-plugin-gfx/Gfx/Graph/Utils.hpp:122