1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrDrawOpAtlas.h"
9
10#include "include/gpu/GrContext.h"
11#include "src/core/SkOpts.h"
12#include "src/gpu/GrContextPriv.h"
13#include "src/gpu/GrGpu.h"
14#include "src/gpu/GrOnFlushResourceProvider.h"
15#include "src/gpu/GrOpFlushState.h"
16#include "src/gpu/GrProxyProvider.h"
17#include "src/gpu/GrResourceProvider.h"
18#include "src/gpu/GrResourceProviderPriv.h"
19#include "src/gpu/GrSurfaceProxyPriv.h"
20#include "src/gpu/GrTexture.h"
21#include "src/gpu/GrTracing.h"
22
23#ifdef DUMP_ATLAS_DATA
24static bool gDumpAtlasData = false;
25#endif
26
27std::array<uint16_t, 4> GrDrawOpAtlas::AtlasLocator::getUVs(int padding) const {
28
29 uint16_t left = fRect.fLeft + padding;
30 uint16_t top = fRect.fTop + padding;
31 uint16_t right = fRect.fRight - padding;
32 uint16_t bottom = fRect.fBottom - padding;
33
34 // We pack the 2bit page index in the low bit of the u and v texture coords
35 uint32_t pageIndex = this->pageIndex();
36 std::tie(left, bottom) = GrDrawOpAtlas::PackIndexInTexCoords(left, bottom, pageIndex);
37 std::tie(right, top) = GrDrawOpAtlas::PackIndexInTexCoords(right, top, pageIndex);
38 return { left, top, right, bottom };
39}
40
41#ifdef SK_DEBUG
42void GrDrawOpAtlas::AtlasLocator::validate(const GrDrawOpAtlas* drawOpAtlas) const {
43 // Verify that the plotIndex stored in the PlotLocator is consistent with the glyph rectangle
44 int numPlotsX = drawOpAtlas->fTextureWidth / drawOpAtlas->fPlotWidth;
45 int numPlotsY = drawOpAtlas->fTextureHeight / drawOpAtlas->fPlotHeight;
46
47 int plotIndex = this->plotIndex();
48 int plotX = fRect.fLeft / drawOpAtlas->fPlotWidth;
49 int plotY = fRect.fTop / drawOpAtlas->fPlotHeight;
50 SkASSERT(plotIndex == (numPlotsY - plotY - 1) * numPlotsX + (numPlotsX - plotX - 1));
51}
52#endif
53
54// When proxy allocation is deferred until flush time the proxies acting as atlases require
55// special handling. This is because the usage that can be determined from the ops themselves
56// isn't sufficient. Independent of the ops there will be ASAP and inline uploads to the
57// atlases. Extending the usage interval of any op that uses an atlas to the start of the
58// flush (as is done for proxies that are used for sw-generated masks) also won't work because
59// the atlas persists even beyond the last use in an op - for a given flush. Given this, atlases
60// must explicitly manage the lifetime of their backing proxies via the onFlushCallback system
61// (which calls this method).
62void GrDrawOpAtlas::instantiate(GrOnFlushResourceProvider* onFlushResourceProvider) {
63 for (uint32_t i = 0; i < fNumActivePages; ++i) {
64 // All the atlas pages are now instantiated at flush time in the activeNewPage method.
65 SkASSERT(fViews[i].proxy() && fViews[i].proxy()->isInstantiated());
66 }
67}
68
69std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrProxyProvider* proxyProvider,
70 const GrBackendFormat& format,
71 GrColorType colorType, int width,
72 int height, int plotWidth, int plotHeight,
73 GenerationCounter* generationCounter,
74 AllowMultitexturing allowMultitexturing,
75 EvictionCallback* evictor) {
76 if (!format.isValid()) {
77 return nullptr;
78 }
79
80 std::unique_ptr<GrDrawOpAtlas> atlas(new GrDrawOpAtlas(proxyProvider, format, colorType,
81 width, height, plotWidth, plotHeight,
82 generationCounter,
83 allowMultitexturing));
84 if (!atlas->getViews()[0].proxy()) {
85 return nullptr;
86 }
87
88 if (evictor != nullptr) {
89 atlas->fEvictionCallbacks.emplace_back(evictor);
90 }
91 return atlas;
92}
93
94// The two bits that make up the texture index are packed into the lower bits of the u and v
95// coordinate respectively.
96std::pair<uint16_t, uint16_t> GrDrawOpAtlas::PackIndexInTexCoords(uint16_t u, uint16_t v,
97 int pageIndex) {
98 SkASSERT(pageIndex >= 0 && pageIndex < 4);
99 uint16_t uBit = (pageIndex >> 1u) & 0x1u;
100 uint16_t vBit = pageIndex & 0x1u;
101 u <<= 1u;
102 u |= uBit;
103 v <<= 1u;
104 v |= vBit;
105 return std::make_pair(u, v);
106}
107
108std::tuple<uint16_t, uint16_t, int> GrDrawOpAtlas::UnpackIndexFromTexCoords(uint16_t u,
109 uint16_t v) {
110 int pageIndex = 0;
111 if (u & 0x1) {
112 pageIndex |= 0x2;
113 }
114 if (v & 0x1) {
115 pageIndex |= 0x1;
116 }
117 return std::make_tuple(u >> 1, v >> 1, pageIndex);
118}
119
120////////////////////////////////////////////////////////////////////////////////
121GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, GenerationCounter* generationCounter,
122 int offX, int offY, int width, int height, GrColorType colorType)
123 : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
124 , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
125 , fFlushesSinceLastUse(0)
126 , fPageIndex(pageIndex)
127 , fPlotIndex(plotIndex)
128 , fGenerationCounter(generationCounter)
129 , fGenID(fGenerationCounter->next())
130 , fPlotLocator(CreatePlotLocator(fPageIndex, fPlotIndex, fGenID))
131 , fData(nullptr)
132 , fWidth(width)
133 , fHeight(height)
134 , fX(offX)
135 , fY(offY)
136 , fRectanizer(width, height)
137 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
138 , fColorType(colorType)
139 , fBytesPerPixel(GrColorTypeBytesPerPixel(colorType))
140#ifdef SK_DEBUG
141 , fDirty(false)
142#endif
143{
144 // We expect the allocated dimensions to be a multiple of 4 bytes
145 SkASSERT(((width*fBytesPerPixel) & 0x3) == 0);
146 // The padding for faster uploads only works for 1, 2 and 4 byte texels
147 SkASSERT(fBytesPerPixel != 3 && fBytesPerPixel <= 4);
148 fDirtyRect.setEmpty();
149}
150
151GrDrawOpAtlas::Plot::~Plot() {
152 sk_free(fData);
153}
154
155bool GrDrawOpAtlas::Plot::addSubImage(int width, int height, const void* image, GrIRect16* rect) {
156 SkASSERT(width <= fWidth && height <= fHeight);
157
158 SkIPoint16 loc;
159 if (!fRectanizer.addRect(width, height, &loc)) {
160 return false;
161 }
162
163 *rect = GrIRect16::MakeXYWH(loc.fX, loc.fY, width, height);
164
165 if (!fData) {
166 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
167 fHeight));
168 }
169 size_t rowBytes = width * fBytesPerPixel;
170 const unsigned char* imagePtr = (const unsigned char*)image;
171 // point ourselves at the right starting spot
172 unsigned char* dataPtr = fData;
173 dataPtr += fBytesPerPixel * fWidth * rect->fTop;
174 dataPtr += fBytesPerPixel * rect->fLeft;
175 // copy into the data buffer, swizzling as we go if this is ARGB data
176 if (4 == fBytesPerPixel && kN32_SkColorType == kBGRA_8888_SkColorType) {
177 for (int i = 0; i < height; ++i) {
178 SkOpts::RGBA_to_BGRA((uint32_t*)dataPtr, (const uint32_t*)imagePtr, width);
179 dataPtr += fBytesPerPixel * fWidth;
180 imagePtr += rowBytes;
181 }
182 } else {
183 for (int i = 0; i < height; ++i) {
184 memcpy(dataPtr, imagePtr, rowBytes);
185 dataPtr += fBytesPerPixel * fWidth;
186 imagePtr += rowBytes;
187 }
188 }
189
190 fDirtyRect.join({rect->fLeft, rect->fTop, rect->fRight, rect->fBottom});
191
192 rect->offset(fOffset.fX, fOffset.fY);
193 SkDEBUGCODE(fDirty = true;)
194
195 return true;
196}
197
198void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
199 GrTextureProxy* proxy) {
200 // We should only be issuing uploads if we are in fact dirty
201 SkASSERT(fDirty && fData && proxy && proxy->peekTexture());
202 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
203 size_t rowBytes = fBytesPerPixel * fWidth;
204 const unsigned char* dataPtr = fData;
205 // Clamp to 4-byte aligned boundaries
206 unsigned int clearBits = 0x3 / fBytesPerPixel;
207 fDirtyRect.fLeft &= ~clearBits;
208 fDirtyRect.fRight += clearBits;
209 fDirtyRect.fRight &= ~clearBits;
210 SkASSERT(fDirtyRect.fRight <= fWidth);
211 // Set up dataPtr
212 dataPtr += rowBytes * fDirtyRect.fTop;
213 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
214
215 writePixels(proxy, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
216 fDirtyRect.width(), fDirtyRect.height(), fColorType, dataPtr, rowBytes);
217 fDirtyRect.setEmpty();
218 SkDEBUGCODE(fDirty = false;)
219}
220
221void GrDrawOpAtlas::Plot::resetRects() {
222 fRectanizer.reset();
223
224 fGenID = fGenerationCounter->next();
225 fPlotLocator = CreatePlotLocator(fPageIndex, fPlotIndex, fGenID);
226 fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
227 fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
228
229 // zero out the plot
230 if (fData) {
231 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
232 }
233
234 fDirtyRect.setEmpty();
235 SkDEBUGCODE(fDirty = false;)
236}
237
238///////////////////////////////////////////////////////////////////////////////
239
240GrDrawOpAtlas::GrDrawOpAtlas(
241 GrProxyProvider* proxyProvider, const GrBackendFormat& format,
242 GrColorType colorType, int width, int height, int plotWidth, int plotHeight,
243 GenerationCounter* generationCounter, AllowMultitexturing allowMultitexturing)
244 : fFormat(format)
245 , fColorType(colorType)
246 , fTextureWidth(width)
247 , fTextureHeight(height)
248 , fPlotWidth(plotWidth)
249 , fPlotHeight(plotHeight)
250 , fGenerationCounter(generationCounter)
251 , fAtlasGeneration(fGenerationCounter->next())
252 , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
253 , fFlushesSinceLastUse(0)
254 , fMaxPages(AllowMultitexturing::kYes == allowMultitexturing ? kMaxMultitexturePages : 1)
255 , fNumActivePages(0) {
256 int numPlotsX = width/plotWidth;
257 int numPlotsY = height/plotHeight;
258 SkASSERT(numPlotsX * numPlotsY <= GrDrawOpAtlas::kMaxPlots);
259 SkASSERT(fPlotWidth * numPlotsX == fTextureWidth);
260 SkASSERT(fPlotHeight * numPlotsY == fTextureHeight);
261
262 fNumPlots = numPlotsX * numPlotsY;
263
264 this->createPages(proxyProvider, generationCounter);
265}
266
267inline void GrDrawOpAtlas::processEviction(PlotLocator plotLocator) {
268 for (auto evictor : fEvictionCallbacks) {
269 evictor->evict(plotLocator);
270 }
271
272 fAtlasGeneration = fGenerationCounter->next();
273}
274
275inline bool GrDrawOpAtlas::updatePlot(GrDeferredUploadTarget* target,
276 AtlasLocator* atlasLocator, Plot* plot) {
277 int pageIdx = plot->pageIndex();
278 this->makeMRU(plot, pageIdx);
279
280 // If our most recent upload has already occurred then we have to insert a new
281 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
282 // This new update will piggy back on that previously scheduled update.
283 if (plot->lastUploadToken() < target->tokenTracker()->nextTokenToFlush()) {
284 // With c+14 we could move sk_sp into lamba to only ref once.
285 sk_sp<Plot> plotsp(SkRef(plot));
286
287 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
288 SkASSERT(proxy && proxy->isInstantiated()); // This is occurring at flush time
289
290 GrDeferredUploadToken lastUploadToken = target->addASAPUpload(
291 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
292 plotsp->uploadToTexture(writePixels, proxy);
293 });
294 plot->setLastUploadToken(lastUploadToken);
295 }
296 atlasLocator->fPlotLocator = plot->plotLocator();
297 SkDEBUGCODE(atlasLocator->validate(this);)
298 return true;
299}
300
301bool GrDrawOpAtlas::uploadToPage(const GrCaps& caps, unsigned int pageIdx,
302 GrDeferredUploadTarget* target, int width, int height,
303 const void* image, AtlasLocator* atlasLocator) {
304 SkASSERT(fViews[pageIdx].proxy() && fViews[pageIdx].proxy()->isInstantiated());
305
306 // look through all allocated plots for one we can share, in Most Recently Refed order
307 PlotList::Iter plotIter;
308 plotIter.init(fPages[pageIdx].fPlotList, PlotList::Iter::kHead_IterStart);
309
310 for (Plot* plot = plotIter.get(); plot; plot = plotIter.next()) {
311 SkASSERT(caps.bytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) == plot->bpp());
312
313 if (plot->addSubImage(width, height, image, &atlasLocator->fRect)) {
314 return this->updatePlot(target, atlasLocator, plot);
315 }
316 }
317
318 return false;
319}
320
321// Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
322//
323// This value is somewhat arbitrary -- the idea is to keep it low enough that
324// a page with unused plots will get removed reasonably quickly, but allow it
325// to hang around for a bit in case it's needed. The assumption is that flushes
326// are rare; i.e., we are not continually refreshing the frame.
327static constexpr auto kPlotRecentlyUsedCount = 256;
328static constexpr auto kAtlasRecentlyUsedCount = 1024;
329
330GrDrawOpAtlas::ErrorCode GrDrawOpAtlas::addToAtlas(GrResourceProvider* resourceProvider,
331 GrDeferredUploadTarget* target,
332 int width, int height, const void* image,
333 AtlasLocator* atlasLocator) {
334 if (width > fPlotWidth || height > fPlotHeight) {
335 return ErrorCode::kError;
336 }
337
338 const GrCaps& caps = *resourceProvider->caps();
339
340 // Look through each page to see if we can upload without having to flush
341 // We prioritize this upload to the first pages, not the most recently used, to make it easier
342 // to remove unused pages in reverse page order.
343 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
344 if (this->uploadToPage(caps, pageIdx, target, width, height, image, atlasLocator)) {
345 return ErrorCode::kSucceeded;
346 }
347 }
348
349 // If the above fails, then see if the least recently used plot per page has already been
350 // flushed to the gpu if we're at max page allocation, or if the plot has aged out otherwise.
351 // We wait until we've grown to the full number of pages to begin evicting already flushed
352 // plots so that we can maximize the opportunity for reuse.
353 // As before we prioritize this upload to the first pages, not the most recently used.
354 if (fNumActivePages == this->maxPages()) {
355 for (unsigned int pageIdx = 0; pageIdx < fNumActivePages; ++pageIdx) {
356 Plot* plot = fPages[pageIdx].fPlotList.tail();
357 SkASSERT(plot);
358 if (plot->lastUseToken() < target->tokenTracker()->nextTokenToFlush()) {
359 this->processEvictionAndResetRects(plot);
360 SkASSERT(caps.bytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) ==
361 plot->bpp());
362 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image,
363 &atlasLocator->fRect);
364 SkASSERT(verify);
365 if (!this->updatePlot(target, atlasLocator, plot)) {
366 return ErrorCode::kError;
367 }
368 return ErrorCode::kSucceeded;
369 }
370 }
371 } else {
372 // If we haven't activated all the available pages, try to create a new one and add to it
373 if (!this->activateNewPage(resourceProvider)) {
374 return ErrorCode::kError;
375 }
376
377 if (this->uploadToPage(caps, fNumActivePages-1, target, width, height, image,
378 atlasLocator)) {
379 return ErrorCode::kSucceeded;
380 } else {
381 // If we fail to upload to a newly activated page then something has gone terribly
382 // wrong - return an error
383 return ErrorCode::kError;
384 }
385 }
386
387 if (!fNumActivePages) {
388 return ErrorCode::kError;
389 }
390
391 // Try to find a plot that we can perform an inline upload to.
392 // We prioritize this upload in reverse order of pages to counterbalance the order above.
393 Plot* plot = nullptr;
394 for (int pageIdx = ((int)fNumActivePages)-1; pageIdx >= 0; --pageIdx) {
395 Plot* currentPlot = fPages[pageIdx].fPlotList.tail();
396 if (currentPlot->lastUseToken() != target->tokenTracker()->nextDrawToken()) {
397 plot = currentPlot;
398 break;
399 }
400 }
401
402 // If we can't find a plot that is not used in a draw currently being prepared by an op, then
403 // we have to fail. This gives the op a chance to enqueue the draw, and call back into this
404 // function. When that draw is enqueued, the draw token advances, and the subsequent call will
405 // continue past this branch and prepare an inline upload that will occur after the enqueued
406 // draw which references the plot's pre-upload content.
407 if (!plot) {
408 return ErrorCode::kTryAgain;
409 }
410
411 this->processEviction(plot->plotLocator());
412 int pageIdx = plot->pageIndex();
413 fPages[pageIdx].fPlotList.remove(plot);
414 sk_sp<Plot>& newPlot = fPages[pageIdx].fPlotArray[plot->plotIndex()];
415 newPlot.reset(plot->clone());
416
417 fPages[pageIdx].fPlotList.addToHead(newPlot.get());
418 SkASSERT(caps.bytesPerPixel(fViews[pageIdx].proxy()->backendFormat()) == newPlot->bpp());
419 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, &atlasLocator->fRect);
420 SkASSERT(verify);
421
422 // Note that this plot will be uploaded inline with the draws whereas the
423 // one it displaced most likely was uploaded ASAP.
424 // With c++14 we could move sk_sp into lambda to only ref once.
425 sk_sp<Plot> plotsp(SkRef(newPlot.get()));
426
427 GrTextureProxy* proxy = fViews[pageIdx].asTextureProxy();
428 SkASSERT(proxy && proxy->isInstantiated());
429
430 GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
431 [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
432 plotsp->uploadToTexture(writePixels, proxy);
433 });
434 newPlot->setLastUploadToken(lastUploadToken);
435
436 atlasLocator->fPlotLocator = newPlot->plotLocator();
437 SkDEBUGCODE(atlasLocator->validate(this);)
438
439 return ErrorCode::kSucceeded;
440}
441
442void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
443 if (fNumActivePages < 1) {
444 fPrevFlushToken = startTokenForNextFlush;
445 return;
446 }
447
448 // For all plots, reset number of flushes since used if used this frame.
449 PlotList::Iter plotIter;
450 bool atlasUsedThisFlush = false;
451 for (uint32_t pageIndex = 0; pageIndex < fNumActivePages; ++pageIndex) {
452 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
453 while (Plot* plot = plotIter.get()) {
454 // Reset number of flushes since used
455 if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
456 plot->resetFlushesSinceLastUsed();
457 atlasUsedThisFlush = true;
458 }
459
460 plotIter.next();
461 }
462 }
463
464 if (atlasUsedThisFlush) {
465 fFlushesSinceLastUse = 0;
466 } else {
467 ++fFlushesSinceLastUse;
468 }
469
470 // We only try to compact if the atlas was used in the recently completed flush or
471 // hasn't been used in a long time.
472 // This is to handle the case where a lot of text or path rendering has occurred but then just
473 // a blinking cursor is drawn.
474 if (atlasUsedThisFlush || fFlushesSinceLastUse > kAtlasRecentlyUsedCount) {
475 SkTArray<Plot*> availablePlots;
476 uint32_t lastPageIndex = fNumActivePages - 1;
477
478 // For all plots but the last one, update number of flushes since used, and check to see
479 // if there are any in the first pages that the last page can safely upload to.
480 for (uint32_t pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex) {
481#ifdef DUMP_ATLAS_DATA
482 if (gDumpAtlasData) {
483 SkDebugf("page %d: ", pageIndex);
484 }
485#endif
486 plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
487 while (Plot* plot = plotIter.get()) {
488 // Update number of flushes since plot was last used
489 // We only increment the 'sinceLastUsed' count for flushes where the atlas was used
490 // to avoid deleting everything when we return to text drawing in the blinking
491 // cursor case
492 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
493 plot->incFlushesSinceLastUsed();
494 }
495
496#ifdef DUMP_ATLAS_DATA
497 if (gDumpAtlasData) {
498 SkDebugf("%d ", plot->flushesSinceLastUsed());
499 }
500#endif
501 // Count plots we can potentially upload to in all pages except the last one
502 // (the potential compactee).
503 if (plot->flushesSinceLastUsed() > kPlotRecentlyUsedCount) {
504 availablePlots.push_back() = plot;
505 }
506
507 plotIter.next();
508 }
509#ifdef DUMP_ATLAS_DATA
510 if (gDumpAtlasData) {
511 SkDebugf("\n");
512 }
513#endif
514 }
515
516 // Count recently used plots in the last page and evict any that are no longer in use.
517 // Since we prioritize uploading to the first pages, this will eventually
518 // clear out usage of this page unless we have a large need.
519 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
520 unsigned int usedPlots = 0;
521#ifdef DUMP_ATLAS_DATA
522 if (gDumpAtlasData) {
523 SkDebugf("page %d: ", lastPageIndex);
524 }
525#endif
526 while (Plot* plot = plotIter.get()) {
527 // Update number of flushes since plot was last used
528 if (!plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
529 plot->incFlushesSinceLastUsed();
530 }
531
532#ifdef DUMP_ATLAS_DATA
533 if (gDumpAtlasData) {
534 SkDebugf("%d ", plot->flushesSinceLastUsed());
535 }
536#endif
537 // If this plot was used recently
538 if (plot->flushesSinceLastUsed() <= kPlotRecentlyUsedCount) {
539 usedPlots++;
540 } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
541 // otherwise if aged out just evict it.
542 this->processEvictionAndResetRects(plot);
543 }
544 plotIter.next();
545 }
546#ifdef DUMP_ATLAS_DATA
547 if (gDumpAtlasData) {
548 SkDebugf("\n");
549 }
550#endif
551
552 // If recently used plots in the last page are using less than a quarter of the page, try
553 // to evict them if there's available space in earlier pages. Since we prioritize uploading
554 // to the first pages, this will eventually clear out usage of this page unless we have a
555 // large need.
556 if (availablePlots.count() && usedPlots && usedPlots <= fNumPlots / 4) {
557 plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
558 while (Plot* plot = plotIter.get()) {
559 // If this plot was used recently
560 if (plot->flushesSinceLastUsed() <= kPlotRecentlyUsedCount) {
561 // See if there's room in an earlier page and if so evict.
562 // We need to be somewhat harsh here so that a handful of plots that are
563 // consistently in use don't end up locking the page in memory.
564 if (availablePlots.count() > 0) {
565 this->processEvictionAndResetRects(plot);
566 this->processEvictionAndResetRects(availablePlots.back());
567 availablePlots.pop_back();
568 --usedPlots;
569 }
570 if (!usedPlots || !availablePlots.count()) {
571 break;
572 }
573 }
574 plotIter.next();
575 }
576 }
577
578 // If none of the plots in the last page have been used recently, delete it.
579 if (!usedPlots) {
580#ifdef DUMP_ATLAS_DATA
581 if (gDumpAtlasData) {
582 SkDebugf("delete %d\n", fNumActivePages-1);
583 }
584#endif
585 this->deactivateLastPage();
586 fFlushesSinceLastUse = 0;
587 }
588 }
589
590 fPrevFlushToken = startTokenForNextFlush;
591}
592
593bool GrDrawOpAtlas::createPages(
594 GrProxyProvider* proxyProvider, GenerationCounter* generationCounter) {
595 SkASSERT(SkIsPow2(fTextureWidth) && SkIsPow2(fTextureHeight));
596
597 SkISize dims = {fTextureWidth, fTextureHeight};
598
599 int numPlotsX = fTextureWidth/fPlotWidth;
600 int numPlotsY = fTextureHeight/fPlotHeight;
601
602 for (uint32_t i = 0; i < this->maxPages(); ++i) {
603 GrSwizzle swizzle = proxyProvider->caps()->getReadSwizzle(fFormat, fColorType);
604 sk_sp<GrSurfaceProxy> proxy = proxyProvider->createProxy(
605 fFormat, dims, GrRenderable::kNo, 1, GrMipMapped::kNo, SkBackingFit::kExact,
606 SkBudgeted::kYes, GrProtected::kNo, GrInternalSurfaceFlags::kNone,
607 GrSurfaceProxy::UseAllocator::kNo);
608 if (!proxy) {
609 return false;
610 }
611 fViews[i] = GrSurfaceProxyView(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle);
612
613 // set up allocated plots
614 fPages[i].fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]);
615
616 sk_sp<Plot>* currPlot = fPages[i].fPlotArray.get();
617 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
618 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
619 uint32_t plotIndex = r * numPlotsX + c;
620 currPlot->reset(new Plot(
621 i, plotIndex, generationCounter, x, y, fPlotWidth, fPlotHeight, fColorType));
622
623 // build LRU list
624 fPages[i].fPlotList.addToHead(currPlot->get());
625 ++currPlot;
626 }
627 }
628
629 }
630
631 return true;
632}
633
634
635bool GrDrawOpAtlas::activateNewPage(GrResourceProvider* resourceProvider) {
636 SkASSERT(fNumActivePages < this->maxPages());
637
638 if (!fViews[fNumActivePages].proxy()->instantiate(resourceProvider)) {
639 return false;
640 }
641
642#ifdef DUMP_ATLAS_DATA
643 if (gDumpAtlasData) {
644 SkDebugf("activated page#: %d\n", fNumActivePages);
645 }
646#endif
647
648 ++fNumActivePages;
649 return true;
650}
651
652
653inline void GrDrawOpAtlas::deactivateLastPage() {
654 SkASSERT(fNumActivePages);
655
656 uint32_t lastPageIndex = fNumActivePages - 1;
657
658 int numPlotsX = fTextureWidth/fPlotWidth;
659 int numPlotsY = fTextureHeight/fPlotHeight;
660
661 fPages[lastPageIndex].fPlotList.reset();
662 for (int r = 0; r < numPlotsY; ++r) {
663 for (int c = 0; c < numPlotsX; ++c) {
664 uint32_t plotIndex = r * numPlotsX + c;
665
666 Plot* currPlot = fPages[lastPageIndex].fPlotArray[plotIndex].get();
667 currPlot->resetRects();
668 currPlot->resetFlushesSinceLastUsed();
669
670 // rebuild the LRU list
671 SkDEBUGCODE(currPlot->fPrev = currPlot->fNext = nullptr);
672 SkDEBUGCODE(currPlot->fList = nullptr);
673 fPages[lastPageIndex].fPlotList.addToHead(currPlot);
674 }
675 }
676
677 // remove ref to the backing texture
678 fViews[lastPageIndex].proxy()->deinstantiate();
679 --fNumActivePages;
680}
681
682GrDrawOpAtlasConfig::GrDrawOpAtlasConfig(int maxTextureSize, size_t maxBytes) {
683 static const SkISize kARGBDimensions[] = {
684 {256, 256}, // maxBytes < 2^19
685 {512, 256}, // 2^19 <= maxBytes < 2^20
686 {512, 512}, // 2^20 <= maxBytes < 2^21
687 {1024, 512}, // 2^21 <= maxBytes < 2^22
688 {1024, 1024}, // 2^22 <= maxBytes < 2^23
689 {2048, 1024}, // 2^23 <= maxBytes
690 };
691
692 // Index 0 corresponds to maxBytes of 2^18, so start by dividing it by that
693 maxBytes >>= 18;
694 // Take the floor of the log to get the index
695 int index = maxBytes > 0
696 ? SkTPin<int>(SkPrevLog2(maxBytes), 0, SK_ARRAY_COUNT(kARGBDimensions) - 1)
697 : 0;
698
699 SkASSERT(kARGBDimensions[index].width() <= kMaxAtlasDim);
700 SkASSERT(kARGBDimensions[index].height() <= kMaxAtlasDim);
701 fARGBDimensions.set(std::min<int>(kARGBDimensions[index].width(), maxTextureSize),
702 std::min<int>(kARGBDimensions[index].height(), maxTextureSize));
703 fMaxTextureSize = std::min<int>(maxTextureSize, kMaxAtlasDim);
704}
705
706SkISize GrDrawOpAtlasConfig::atlasDimensions(GrMaskFormat type) const {
707 if (kA8_GrMaskFormat == type) {
708 // A8 is always 2x the ARGB dimensions, clamped to the max allowed texture size
709 return { std::min<int>(2 * fARGBDimensions.width(), fMaxTextureSize),
710 std::min<int>(2 * fARGBDimensions.height(), fMaxTextureSize) };
711 } else {
712 return fARGBDimensions;
713 }
714}
715
716SkISize GrDrawOpAtlasConfig::plotDimensions(GrMaskFormat type) const {
717 if (kA8_GrMaskFormat == type) {
718 SkISize atlasDimensions = this->atlasDimensions(type);
719 // For A8 we want to grow the plots at larger texture sizes to accept more of the
720 // larger SDF glyphs. Since the largest SDF glyph can be 170x170 with padding, this
721 // allows us to pack 3 in a 512x256 plot, or 9 in a 512x512 plot.
722
723 // This will give us 512x256 plots for 2048x1024, 512x512 plots for 2048x2048,
724 // and 256x256 plots otherwise.
725 int plotWidth = atlasDimensions.width() >= 2048 ? 512 : 256;
726 int plotHeight = atlasDimensions.height() >= 2048 ? 512 : 256;
727
728 return { plotWidth, plotHeight };
729 } else {
730 // ARGB and LCD always use 256x256 plots -- this has been shown to be faster
731 return { 256, 256 };
732 }
733}
734
735constexpr int GrDrawOpAtlasConfig::kMaxAtlasDim;
736