| 1 | #include "detail/aglGPUMemBlockMgr.h" |
|---|---|
| 2 | |
| 3 | namespace agl::detail { |
| 4 | const MemoryPoolType MemoryPoolType::cInvalidPoolType(0); |
| 5 | const MemoryPoolType MemoryPoolType::cValidPoolType(VALID_POOL_TYPE_VALUE); |
| 6 | |
| 7 | SEAD_SINGLETON_DISPOSER_IMPL(GPUMemBlockMgr) |
| 8 | |
| 9 | GPUMemBlockMgr::GPUMemBlockMgr() { |
| 10 | mMinBlockSize = cGPUPhysicalMemorySizeAlignment; |
| 11 | mFlags = GPUMemBlockMgrFlags::EnablePoolSharing; |
| 12 | } |
| 13 | |
| 14 | GPUMemBlockMgr::~GPUMemBlockMgr() { |
| 15 | mMngrHeaps.freeBuffer(); |
| 16 | } |
| 17 | |
| 18 | void GPUMemBlockMgr::initialize(sead::Heap* heap1, sead::Heap* heap2) { |
| 19 | mMngrHeaps.allocBuffer(ptrNumMax: 0x1000, heap: heap1); |
| 20 | mMngrHeaps.clear(); |
| 21 | } |
| 22 | |
| 23 | void GPUMemBlockMgr::enableSharedMemoryPool(bool enabled) { |
| 24 | mFlags.change(val: GPUMemBlockMgrFlags::EnablePoolSharing, on: enabled); |
| 25 | } |
| 26 | |
| 27 | u64 GPUMemBlockMgr::calcGPUMemorySize(u64 userSize) { |
| 28 | return sead::MathSizeT::roundUp(x: userSize, multNumber: cGPUPhysicalMemorySizeAlignment); |
| 29 | } |
| 30 | |
| 31 | s32 GPUMemBlockMgr::calcGPUMemoryAlignment(s32 userAlignment) { |
| 32 | return sead::Mathi::roundUpPow2(val: sead::Mathi::abs(x: userAlignment), |
| 33 | base: cGPUPhysicalMemorySizeAlignment) * |
| 34 | sead::Mathi::sign(value: userAlignment); |
| 35 | } |
| 36 | |
| 37 | GPUMemBlockMgrHeapEx* GPUMemBlockMgr::findGPUMemBlockMgrHeapEx_(sead::Heap* p_heap, |
| 38 | int* p_outIndex) { |
| 39 | SEAD_ASSERT(p_heap != nullptr); |
| 40 | |
| 41 | if (mMngrHeaps.isEmpty()) { |
| 42 | return nullptr; |
| 43 | } |
| 44 | |
| 45 | // TODO |
| 46 | return nullptr; |
| 47 | } |
| 48 | |
| 49 | GPUMemBlockMgrHeapEx::GPUMemBlockMgrHeapEx(sead::Heap* p_heap) { |
| 50 | mAllowSharing = 1; |
| 51 | m08 = nullptr; |
| 52 | m10 = nullptr; |
| 53 | } |
| 54 | |
| 55 | } // namespace agl::detail |
| 56 |