1 | /**************************************************************************/ |
2 | /* geometry_2d.cpp */ |
3 | /**************************************************************************/ |
4 | /* This file is part of: */ |
5 | /* GODOT ENGINE */ |
6 | /* https://godotengine.org */ |
7 | /**************************************************************************/ |
8 | /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */ |
9 | /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */ |
10 | /* */ |
11 | /* Permission is hereby granted, free of charge, to any person obtaining */ |
12 | /* a copy of this software and associated documentation files (the */ |
13 | /* "Software"), to deal in the Software without restriction, including */ |
14 | /* without limitation the rights to use, copy, modify, merge, publish, */ |
15 | /* distribute, sublicense, and/or sell copies of the Software, and to */ |
16 | /* permit persons to whom the Software is furnished to do so, subject to */ |
17 | /* the following conditions: */ |
18 | /* */ |
19 | /* The above copyright notice and this permission notice shall be */ |
20 | /* included in all copies or substantial portions of the Software. */ |
21 | /* */ |
22 | /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ |
23 | /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ |
24 | /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */ |
25 | /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ |
26 | /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ |
27 | /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ |
28 | /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ |
29 | /**************************************************************************/ |
30 | |
31 | #include "geometry_2d.h" |
32 | |
33 | #include "thirdparty/misc/clipper.hpp" |
34 | #include "thirdparty/misc/polypartition.h" |
35 | #define STB_RECT_PACK_IMPLEMENTATION |
36 | #include "thirdparty/misc/stb_rect_pack.h" |
37 | |
38 | #define SCALE_FACTOR 100000.0 // Based on CMP_EPSILON. |
39 | |
40 | Vector<Vector<Vector2>> Geometry2D::decompose_polygon_in_convex(Vector<Point2> polygon) { |
41 | Vector<Vector<Vector2>> decomp; |
42 | List<TPPLPoly> in_poly, out_poly; |
43 | |
44 | TPPLPoly inp; |
45 | inp.Init(polygon.size()); |
46 | for (int i = 0; i < polygon.size(); i++) { |
47 | inp.GetPoint(i) = polygon[i]; |
48 | } |
49 | inp.SetOrientation(TPPL_ORIENTATION_CCW); |
50 | in_poly.push_back(inp); |
51 | TPPLPartition tpart; |
52 | if (tpart.ConvexPartition_HM(&in_poly, &out_poly) == 0) { // Failed. |
53 | ERR_PRINT("Convex decomposing failed!" ); |
54 | return decomp; |
55 | } |
56 | |
57 | decomp.resize(out_poly.size()); |
58 | int idx = 0; |
59 | for (List<TPPLPoly>::Element *I = out_poly.front(); I; I = I->next()) { |
60 | TPPLPoly &tp = I->get(); |
61 | |
62 | decomp.write[idx].resize(tp.GetNumPoints()); |
63 | |
64 | for (int64_t i = 0; i < tp.GetNumPoints(); i++) { |
65 | decomp.write[idx].write[i] = tp.GetPoint(i); |
66 | } |
67 | |
68 | idx++; |
69 | } |
70 | |
71 | return decomp; |
72 | } |
73 | |
74 | struct _AtlasWorkRect { |
75 | Size2i s; |
76 | Point2i p; |
77 | int idx = 0; |
78 | _FORCE_INLINE_ bool operator<(const _AtlasWorkRect &p_r) const { return s.width > p_r.s.width; }; |
79 | }; |
80 | |
81 | struct _AtlasWorkRectResult { |
82 | Vector<_AtlasWorkRect> result; |
83 | int max_w = 0; |
84 | int max_h = 0; |
85 | }; |
86 | |
87 | void Geometry2D::make_atlas(const Vector<Size2i> &p_rects, Vector<Point2i> &r_result, Size2i &r_size) { |
88 | // Super simple, almost brute force scanline stacking fitter. |
89 | // It's pretty basic for now, but it tries to make sure that the aspect ratio of the |
90 | // resulting atlas is somehow square. This is necessary because video cards have limits |
91 | // on texture size (usually 2048 or 4096), so the squarer a texture, the more the chances |
92 | // that it will work in every hardware. |
93 | // For example, it will prioritize a 1024x1024 atlas (works everywhere) instead of a |
94 | // 256x8192 atlas (won't work anywhere). |
95 | |
96 | ERR_FAIL_COND(p_rects.size() == 0); |
97 | for (int i = 0; i < p_rects.size(); i++) { |
98 | ERR_FAIL_COND(p_rects[i].width <= 0); |
99 | ERR_FAIL_COND(p_rects[i].height <= 0); |
100 | } |
101 | |
102 | Vector<_AtlasWorkRect> wrects; |
103 | wrects.resize(p_rects.size()); |
104 | for (int i = 0; i < p_rects.size(); i++) { |
105 | wrects.write[i].s = p_rects[i]; |
106 | wrects.write[i].idx = i; |
107 | } |
108 | wrects.sort(); |
109 | int widest = wrects[0].s.width; |
110 | |
111 | Vector<_AtlasWorkRectResult> results; |
112 | |
113 | for (int i = 0; i <= 12; i++) { |
114 | int w = 1 << i; |
115 | int max_h = 0; |
116 | int max_w = 0; |
117 | if (w < widest) { |
118 | continue; |
119 | } |
120 | |
121 | Vector<int> hmax; |
122 | hmax.resize(w); |
123 | for (int j = 0; j < w; j++) { |
124 | hmax.write[j] = 0; |
125 | } |
126 | |
127 | // Place them. |
128 | int ofs = 0; |
129 | int limit_h = 0; |
130 | for (int j = 0; j < wrects.size(); j++) { |
131 | if (ofs + wrects[j].s.width > w) { |
132 | ofs = 0; |
133 | } |
134 | |
135 | int from_y = 0; |
136 | for (int k = 0; k < wrects[j].s.width; k++) { |
137 | if (hmax[ofs + k] > from_y) { |
138 | from_y = hmax[ofs + k]; |
139 | } |
140 | } |
141 | |
142 | wrects.write[j].p.x = ofs; |
143 | wrects.write[j].p.y = from_y; |
144 | int end_h = from_y + wrects[j].s.height; |
145 | int end_w = ofs + wrects[j].s.width; |
146 | if (ofs == 0) { |
147 | limit_h = end_h; |
148 | } |
149 | |
150 | for (int k = 0; k < wrects[j].s.width; k++) { |
151 | hmax.write[ofs + k] = end_h; |
152 | } |
153 | |
154 | if (end_h > max_h) { |
155 | max_h = end_h; |
156 | } |
157 | |
158 | if (end_w > max_w) { |
159 | max_w = end_w; |
160 | } |
161 | |
162 | if (ofs == 0 || end_h > limit_h) { // While h limit not reached, keep stacking. |
163 | ofs += wrects[j].s.width; |
164 | } |
165 | } |
166 | |
167 | _AtlasWorkRectResult result; |
168 | result.result = wrects; |
169 | result.max_h = max_h; |
170 | result.max_w = max_w; |
171 | results.push_back(result); |
172 | } |
173 | |
174 | // Find the result with the best aspect ratio. |
175 | |
176 | int best = -1; |
177 | real_t best_aspect = 1e20; |
178 | |
179 | for (int i = 0; i < results.size(); i++) { |
180 | real_t h = next_power_of_2(results[i].max_h); |
181 | real_t w = next_power_of_2(results[i].max_w); |
182 | real_t aspect = h > w ? h / w : w / h; |
183 | if (aspect < best_aspect) { |
184 | best = i; |
185 | best_aspect = aspect; |
186 | } |
187 | } |
188 | |
189 | r_result.resize(p_rects.size()); |
190 | |
191 | for (int i = 0; i < p_rects.size(); i++) { |
192 | r_result.write[results[best].result[i].idx] = results[best].result[i].p; |
193 | } |
194 | |
195 | r_size = Size2(results[best].max_w, results[best].max_h); |
196 | } |
197 | |
198 | Vector<Vector<Point2>> Geometry2D::_polypaths_do_operation(PolyBooleanOperation p_op, const Vector<Point2> &p_polypath_a, const Vector<Point2> &p_polypath_b, bool is_a_open) { |
199 | using namespace ClipperLib; |
200 | |
201 | ClipType op = ctUnion; |
202 | |
203 | switch (p_op) { |
204 | case OPERATION_UNION: |
205 | op = ctUnion; |
206 | break; |
207 | case OPERATION_DIFFERENCE: |
208 | op = ctDifference; |
209 | break; |
210 | case OPERATION_INTERSECTION: |
211 | op = ctIntersection; |
212 | break; |
213 | case OPERATION_XOR: |
214 | op = ctXor; |
215 | break; |
216 | } |
217 | Path path_a, path_b; |
218 | |
219 | // Need to scale points (Clipper's requirement for robust computation). |
220 | for (int i = 0; i != p_polypath_a.size(); ++i) { |
221 | path_a << IntPoint(p_polypath_a[i].x * (real_t)SCALE_FACTOR, p_polypath_a[i].y * (real_t)SCALE_FACTOR); |
222 | } |
223 | for (int i = 0; i != p_polypath_b.size(); ++i) { |
224 | path_b << IntPoint(p_polypath_b[i].x * (real_t)SCALE_FACTOR, p_polypath_b[i].y * (real_t)SCALE_FACTOR); |
225 | } |
226 | Clipper clp; |
227 | clp.AddPath(path_a, ptSubject, !is_a_open); // Forward compatible with Clipper 10.0.0. |
228 | clp.AddPath(path_b, ptClip, true); // Polylines cannot be set as clip. |
229 | |
230 | Paths paths; |
231 | |
232 | if (is_a_open) { |
233 | PolyTree tree; // Needed to populate polylines. |
234 | clp.Execute(op, tree); |
235 | OpenPathsFromPolyTree(tree, paths); |
236 | } else { |
237 | clp.Execute(op, paths); // Works on closed polygons only. |
238 | } |
239 | // Have to scale points down now. |
240 | Vector<Vector<Point2>> polypaths; |
241 | |
242 | for (Paths::size_type i = 0; i < paths.size(); ++i) { |
243 | Vector<Vector2> polypath; |
244 | |
245 | const Path &scaled_path = paths[i]; |
246 | |
247 | for (Paths::size_type j = 0; j < scaled_path.size(); ++j) { |
248 | polypath.push_back(Point2( |
249 | static_cast<real_t>(scaled_path[j].X) / (real_t)SCALE_FACTOR, |
250 | static_cast<real_t>(scaled_path[j].Y) / (real_t)SCALE_FACTOR)); |
251 | } |
252 | polypaths.push_back(polypath); |
253 | } |
254 | return polypaths; |
255 | } |
256 | |
257 | Vector<Vector<Point2>> Geometry2D::_polypath_offset(const Vector<Point2> &p_polypath, real_t p_delta, PolyJoinType p_join_type, PolyEndType p_end_type) { |
258 | using namespace ClipperLib; |
259 | |
260 | JoinType jt = jtSquare; |
261 | |
262 | switch (p_join_type) { |
263 | case JOIN_SQUARE: |
264 | jt = jtSquare; |
265 | break; |
266 | case JOIN_ROUND: |
267 | jt = jtRound; |
268 | break; |
269 | case JOIN_MITER: |
270 | jt = jtMiter; |
271 | break; |
272 | } |
273 | |
274 | EndType et = etClosedPolygon; |
275 | |
276 | switch (p_end_type) { |
277 | case END_POLYGON: |
278 | et = etClosedPolygon; |
279 | break; |
280 | case END_JOINED: |
281 | et = etClosedLine; |
282 | break; |
283 | case END_BUTT: |
284 | et = etOpenButt; |
285 | break; |
286 | case END_SQUARE: |
287 | et = etOpenSquare; |
288 | break; |
289 | case END_ROUND: |
290 | et = etOpenRound; |
291 | break; |
292 | } |
293 | ClipperOffset co(2.0, 0.25f * (real_t)SCALE_FACTOR); // Defaults from ClipperOffset. |
294 | Path path; |
295 | |
296 | // Need to scale points (Clipper's requirement for robust computation). |
297 | for (int i = 0; i != p_polypath.size(); ++i) { |
298 | path << IntPoint(p_polypath[i].x * (real_t)SCALE_FACTOR, p_polypath[i].y * (real_t)SCALE_FACTOR); |
299 | } |
300 | co.AddPath(path, jt, et); |
301 | |
302 | Paths paths; |
303 | co.Execute(paths, p_delta * (real_t)SCALE_FACTOR); // Inflate/deflate. |
304 | |
305 | // Have to scale points down now. |
306 | Vector<Vector<Point2>> polypaths; |
307 | |
308 | for (Paths::size_type i = 0; i < paths.size(); ++i) { |
309 | Vector<Vector2> polypath; |
310 | |
311 | const Path &scaled_path = paths[i]; |
312 | |
313 | for (Paths::size_type j = 0; j < scaled_path.size(); ++j) { |
314 | polypath.push_back(Point2( |
315 | static_cast<real_t>(scaled_path[j].X) / (real_t)SCALE_FACTOR, |
316 | static_cast<real_t>(scaled_path[j].Y) / (real_t)SCALE_FACTOR)); |
317 | } |
318 | polypaths.push_back(polypath); |
319 | } |
320 | return polypaths; |
321 | } |
322 | |
323 | Vector<Vector3i> Geometry2D::partial_pack_rects(const Vector<Vector2i> &p_sizes, const Size2i &p_atlas_size) { |
324 | Vector<stbrp_node> nodes; |
325 | nodes.resize(p_atlas_size.width); |
326 | memset(nodes.ptrw(), 0, sizeof(stbrp_node) * nodes.size()); |
327 | |
328 | stbrp_context context; |
329 | stbrp_init_target(&context, p_atlas_size.width, p_atlas_size.height, nodes.ptrw(), p_atlas_size.width); |
330 | |
331 | Vector<stbrp_rect> rects; |
332 | rects.resize(p_sizes.size()); |
333 | |
334 | for (int i = 0; i < p_sizes.size(); i++) { |
335 | rects.write[i].id = i; |
336 | rects.write[i].w = p_sizes[i].width; |
337 | rects.write[i].h = p_sizes[i].height; |
338 | rects.write[i].x = 0; |
339 | rects.write[i].y = 0; |
340 | rects.write[i].was_packed = 0; |
341 | } |
342 | |
343 | stbrp_pack_rects(&context, rects.ptrw(), rects.size()); |
344 | |
345 | Vector<Vector3i> ret; |
346 | ret.resize(p_sizes.size()); |
347 | |
348 | for (int i = 0; i < p_sizes.size(); i++) { |
349 | ret.write[rects[i].id] = Vector3i(rects[i].x, rects[i].y, rects[i].was_packed != 0 ? 1 : 0); |
350 | } |
351 | |
352 | return ret; |
353 | } |
354 | |