| 1 | /* |
| 2 | * Copyright 2017 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef GrResourceAllocator_DEFINED |
| 9 | #define GrResourceAllocator_DEFINED |
| 10 | |
| 11 | #include "src/gpu/GrGpuResourcePriv.h" |
| 12 | #include "src/gpu/GrSurface.h" |
| 13 | #include "src/gpu/GrSurfaceProxy.h" |
| 14 | |
| 15 | #include "src/core/SkArenaAlloc.h" |
| 16 | #include "src/core/SkTDynamicHash.h" |
| 17 | #include "src/core/SkTMultiMap.h" |
| 18 | |
| 19 | class GrResourceProvider; |
| 20 | |
| 21 | // Print out explicit allocation information |
| 22 | #define GR_ALLOCATION_SPEW 0 |
| 23 | |
| 24 | // Print out information about interval creation |
| 25 | #define GR_TRACK_INTERVAL_CREATION 0 |
| 26 | |
| 27 | /* |
| 28 | * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by |
| 29 | * being given the usage intervals of the various proxies. It keeps these intervals in a singly |
| 30 | * linked list sorted by increasing start index. (It also maintains a hash table from proxyID |
| 31 | * to interval to find proxy reuse). When it comes time to allocate the resources it |
| 32 | * traverses the sorted list and: |
| 33 | * removes intervals from the active list that have completed (returning their GrSurfaces |
| 34 | * to the free pool) |
| 35 | |
| 36 | * allocates a new resource (preferably from the free pool) for the new interval |
| 37 | * adds the new interval to the active list (that is sorted by increasing end index) |
| 38 | * |
| 39 | * Note: the op indices (used in the usage intervals) come from the order of the ops in |
| 40 | * their opsTasks after the opsTask DAG has been linearized. |
| 41 | * |
| 42 | ************************************************************************************************* |
| 43 | * How does instantiation failure handling work when explicitly allocating? |
| 44 | * |
| 45 | * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be |
| 46 | * gathered (i.e., in GrOpsTask::gatherProxyIntervals). |
| 47 | * |
| 48 | * The allocator will churn through this list but could fail anywhere. |
| 49 | * |
| 50 | * Allocation failure handling occurs at two levels: |
| 51 | * |
| 52 | * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped. |
| 53 | * |
| 54 | * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped |
| 55 | * (via GrOpsTask::purgeOpsWithUninstantiatedProxies) |
| 56 | * |
| 57 | * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and |
| 58 | * individual ops when something goes wrong in allocation (i.e., when the return code from |
| 59 | * GrResourceAllocator::assign is bad) |
| 60 | * |
| 61 | * All together this means we should never attempt to draw an op which is missing some |
| 62 | * required GrSurface. |
| 63 | * |
| 64 | * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass. |
| 65 | * If any of the promise images fail at this stage then the allocator is set into an error |
| 66 | * state and all allocations are then scanned for failures during the main allocation pass. |
| 67 | */ |
| 68 | class GrResourceAllocator { |
| 69 | public: |
| 70 | GrResourceAllocator(GrResourceProvider* resourceProvider SkDEBUGCODE(, int numOpsTasks)) |
| 71 | : fResourceProvider(resourceProvider) SkDEBUGCODE(, fNumOpsTasks(numOpsTasks)) {} |
| 72 | |
| 73 | ~GrResourceAllocator(); |
| 74 | |
| 75 | unsigned int curOp() const { return fNumOps; } |
| 76 | void incOps() { fNumOps++; } |
| 77 | |
| 78 | /** Indicates whether a given call to addInterval represents an actual usage of the |
| 79 | * provided proxy. This is mainly here to accomodate deferred proxies attached to opsTasks. |
| 80 | * In that case we need to create an extra long interval for them (due to the upload) but |
| 81 | * don't want to count that usage/reference towards the proxy's recyclability. |
| 82 | */ |
| 83 | enum class ActualUse : bool { |
| 84 | kNo = false, |
| 85 | kYes = true |
| 86 | }; |
| 87 | |
| 88 | // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets. |
| 89 | // If an existing interval already exists it will be expanded to include the new range. |
| 90 | void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse |
| 91 | SkDEBUGCODE(, bool isDirectDstRead = false)); |
| 92 | |
| 93 | enum class AssignError { |
| 94 | kNoError, |
| 95 | kFailedProxyInstantiation |
| 96 | }; |
| 97 | |
| 98 | // Returns true when the opsTasks from 'startIndex' to 'stopIndex' should be executed; |
| 99 | // false when nothing remains to be executed. |
| 100 | // If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation. |
| 101 | // If this happens, the caller should remove all ops which reference an uninstantiated proxy. |
| 102 | // This is used to execute a portion of the queued opsTasks in order to reduce the total |
| 103 | // amount of GPU resources required. |
| 104 | bool assign(int* startIndex, int* stopIndex, AssignError* outError); |
| 105 | |
| 106 | void determineRecyclability(); |
| 107 | void markEndOfOpsTask(int opsTaskIndex); |
| 108 | |
| 109 | #if GR_ALLOCATION_SPEW |
| 110 | void dumpIntervals(); |
| 111 | #endif |
| 112 | |
| 113 | private: |
| 114 | class Interval; |
| 115 | |
| 116 | // Remove dead intervals from the active list |
| 117 | void expire(unsigned int curIndex); |
| 118 | |
| 119 | bool onOpsTaskBoundary() const; |
| 120 | void forceIntermediateFlush(int* stopIndex); |
| 121 | |
| 122 | // These two methods wrap the interactions with the free pool |
| 123 | void recycleSurface(sk_sp<GrSurface> surface); |
| 124 | sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy); |
| 125 | |
| 126 | struct FreePoolTraits { |
| 127 | static const GrScratchKey& GetKey(const GrSurface& s) { |
| 128 | return s.resourcePriv().getScratchKey(); |
| 129 | } |
| 130 | |
| 131 | static uint32_t Hash(const GrScratchKey& key) { return key.hash(); } |
| 132 | static void OnFree(GrSurface* s) { s->unref(); } |
| 133 | }; |
| 134 | typedef SkTMultiMap<GrSurface, GrScratchKey, FreePoolTraits> FreePoolMultiMap; |
| 135 | |
| 136 | typedef SkTDynamicHash<Interval, unsigned int> IntvlHash; |
| 137 | |
| 138 | class Interval { |
| 139 | public: |
| 140 | Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) |
| 141 | : fProxy(proxy) |
| 142 | , fProxyID(proxy->uniqueID().asUInt()) |
| 143 | , fStart(start) |
| 144 | , fEnd(end) |
| 145 | , fNext(nullptr) { |
| 146 | SkASSERT(proxy); |
| 147 | #if GR_TRACK_INTERVAL_CREATION |
| 148 | fUniqueID = CreateUniqueID(); |
| 149 | SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n" , |
| 150 | fUniqueID, proxy->uniqueID().asUInt(), start, end); |
| 151 | #endif |
| 152 | } |
| 153 | |
| 154 | // Used when recycling an interval |
| 155 | void resetTo(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) { |
| 156 | SkASSERT(proxy); |
| 157 | SkASSERT(!fProxy && !fNext); |
| 158 | |
| 159 | fUses = 0; |
| 160 | fProxy = proxy; |
| 161 | fProxyID = proxy->uniqueID().asUInt(); |
| 162 | fStart = start; |
| 163 | fEnd = end; |
| 164 | fNext = nullptr; |
| 165 | #if GR_TRACK_INTERVAL_CREATION |
| 166 | fUniqueID = CreateUniqueID(); |
| 167 | SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n" , |
| 168 | fUniqueID, proxy->uniqueID().asUInt(), start, end); |
| 169 | #endif |
| 170 | } |
| 171 | |
| 172 | ~Interval() { |
| 173 | SkASSERT(!fAssignedSurface); |
| 174 | } |
| 175 | |
| 176 | const GrSurfaceProxy* proxy() const { return fProxy; } |
| 177 | GrSurfaceProxy* proxy() { return fProxy; } |
| 178 | |
| 179 | unsigned int start() const { return fStart; } |
| 180 | unsigned int end() const { return fEnd; } |
| 181 | |
| 182 | void setNext(Interval* next) { fNext = next; } |
| 183 | const Interval* next() const { return fNext; } |
| 184 | Interval* next() { return fNext; } |
| 185 | |
| 186 | void markAsRecyclable() { fIsRecyclable = true;} |
| 187 | bool isRecyclable() const { return fIsRecyclable; } |
| 188 | |
| 189 | void addUse() { fUses++; } |
| 190 | int uses() { return fUses; } |
| 191 | |
| 192 | void extendEnd(unsigned int newEnd) { |
| 193 | if (newEnd > fEnd) { |
| 194 | fEnd = newEnd; |
| 195 | #if GR_TRACK_INTERVAL_CREATION |
| 196 | SkDebugf("intvl %d: extending from %d to %d\n" , fUniqueID, fEnd, newEnd); |
| 197 | #endif |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | void assign(sk_sp<GrSurface>); |
| 202 | bool wasAssignedSurface() const { return fAssignedSurface != nullptr; } |
| 203 | sk_sp<GrSurface> detachSurface() { return std::move(fAssignedSurface); } |
| 204 | |
| 205 | // for SkTDynamicHash |
| 206 | static const uint32_t& GetKey(const Interval& intvl) { |
| 207 | return intvl.fProxyID; |
| 208 | } |
| 209 | static uint32_t Hash(const uint32_t& key) { return key; } |
| 210 | |
| 211 | private: |
| 212 | sk_sp<GrSurface> fAssignedSurface; |
| 213 | GrSurfaceProxy* fProxy; |
| 214 | uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key |
| 215 | unsigned int fStart; |
| 216 | unsigned int fEnd; |
| 217 | Interval* fNext; |
| 218 | unsigned int fUses = 0; |
| 219 | bool fIsRecyclable = false; |
| 220 | |
| 221 | #if GR_TRACK_INTERVAL_CREATION |
| 222 | uint32_t fUniqueID; |
| 223 | |
| 224 | uint32_t CreateUniqueID(); |
| 225 | #endif |
| 226 | }; |
| 227 | |
| 228 | class IntervalList { |
| 229 | public: |
| 230 | IntervalList() = default; |
| 231 | ~IntervalList() { |
| 232 | // The only time we delete an IntervalList is in the GrResourceAllocator dtor. |
| 233 | // Since the arena allocator will clean up for us we don't bother here. |
| 234 | } |
| 235 | |
| 236 | bool empty() const { |
| 237 | SkASSERT(SkToBool(fHead) == SkToBool(fTail)); |
| 238 | return !SkToBool(fHead); |
| 239 | } |
| 240 | const Interval* peekHead() const { return fHead; } |
| 241 | Interval* peekHead() { return fHead; } |
| 242 | Interval* popHead(); |
| 243 | void insertByIncreasingStart(Interval*); |
| 244 | void insertByIncreasingEnd(Interval*); |
| 245 | Interval* detachAll(); |
| 246 | |
| 247 | private: |
| 248 | SkDEBUGCODE(void validate() const;) |
| 249 | |
| 250 | Interval* fHead = nullptr; |
| 251 | Interval* fTail = nullptr; |
| 252 | }; |
| 253 | |
| 254 | // Compositing use cases can create > 80 intervals. |
| 255 | static const int kInitialArenaSize = 128 * sizeof(Interval); |
| 256 | |
| 257 | GrResourceProvider* fResourceProvider; |
| 258 | FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces |
| 259 | IntvlHash fIntvlHash; // All the intervals, hashed by proxyID |
| 260 | |
| 261 | IntervalList fIntvlList; // All the intervals sorted by increasing start |
| 262 | IntervalList fActiveIntvls; // List of live intervals during assignment |
| 263 | // (sorted by increasing end) |
| 264 | unsigned int fNumOps = 0; |
| 265 | SkTArray<unsigned int> fEndOfOpsTaskOpIndices; |
| 266 | int fCurOpsTaskIndex = 0; |
| 267 | SkDEBUGCODE(const int fNumOpsTasks = -1;) |
| 268 | |
| 269 | SkDEBUGCODE(bool fAssigned = false;) |
| 270 | |
| 271 | char fStorage[kInitialArenaSize]; |
| 272 | SkArenaAlloc fIntervalAllocator{fStorage, kInitialArenaSize, kInitialArenaSize}; |
| 273 | Interval* fFreeIntervalList = nullptr; |
| 274 | bool fLazyInstantiationError = false; |
| 275 | }; |
| 276 | |
| 277 | #endif // GrResourceAllocator_DEFINED |
| 278 | |