diff options
author | Jacek Antonelli | 2008-08-15 23:44:50 -0500 |
---|---|---|
committer | Jacek Antonelli | 2008-08-15 23:44:50 -0500 |
commit | 89fe5dab825a62a0e3fd8d248cbc91c65eb2a426 (patch) | |
tree | bcff14b7888d04a2fec799c59369f6095224bd08 /linden/indra/llrender/llvertexbuffer.cpp | |
parent | Second Life viewer sources 1.13.3.2 (diff) | |
download | meta-impy-89fe5dab825a62a0e3fd8d248cbc91c65eb2a426.zip meta-impy-89fe5dab825a62a0e3fd8d248cbc91c65eb2a426.tar.gz meta-impy-89fe5dab825a62a0e3fd8d248cbc91c65eb2a426.tar.bz2 meta-impy-89fe5dab825a62a0e3fd8d248cbc91c65eb2a426.tar.xz |
Second Life viewer sources 1.14.0.0
Diffstat (limited to 'linden/indra/llrender/llvertexbuffer.cpp')
-rw-r--r-- | linden/indra/llrender/llvertexbuffer.cpp | 922 |
1 files changed, 922 insertions, 0 deletions
diff --git a/linden/indra/llrender/llvertexbuffer.cpp b/linden/indra/llrender/llvertexbuffer.cpp new file mode 100644 index 0000000..b94f593 --- /dev/null +++ b/linden/indra/llrender/llvertexbuffer.cpp | |||
@@ -0,0 +1,922 @@ | |||
1 | #include "linden_common.h" | ||
2 | |||
3 | #include "llvertexbuffer.h" | ||
4 | // #include "llrender.h" | ||
5 | #include "llglheaders.h" | ||
6 | #include "llmemory.h" | ||
7 | #include "llmemtype.h" | ||
8 | |||
9 | //============================================================================ | ||
10 | |||
11 | //static | ||
12 | S32 LLVertexBuffer::sCount = 0; | ||
13 | S32 LLVertexBuffer::sGLCount = 0; | ||
14 | BOOL LLVertexBuffer::sEnableVBOs = TRUE; | ||
15 | U32 LLVertexBuffer::sGLRenderBuffer = 0; | ||
16 | U32 LLVertexBuffer::sGLRenderIndices = 0; | ||
17 | U32 LLVertexBuffer::sLastMask = 0; | ||
18 | BOOL LLVertexBuffer::sVBOActive = FALSE; | ||
19 | BOOL LLVertexBuffer::sIBOActive = FALSE; | ||
20 | U32 LLVertexBuffer::sAllocatedBytes = 0; | ||
21 | BOOL LLVertexBuffer::sRenderActive = FALSE; | ||
22 | |||
23 | std::vector<U32> LLVertexBuffer::sDeleteList; | ||
24 | LLVertexBuffer::buffer_list_t LLVertexBuffer::sLockedList; | ||
25 | |||
26 | S32 LLVertexBuffer::sTypeOffsets[LLVertexBuffer::TYPE_MAX] = | ||
27 | { | ||
28 | sizeof(LLVector3), // TYPE_VERTEX, | ||
29 | sizeof(LLVector3), // TYPE_NORMAL, | ||
30 | sizeof(LLVector2), // TYPE_TEXCOORD, | ||
31 | sizeof(LLVector2), // TYPE_TEXCOORD2, | ||
32 | sizeof(LLColor4U), // TYPE_COLOR, | ||
33 | sizeof(LLVector3), // TYPE_BINORMAL, | ||
34 | sizeof(F32), // TYPE_WEIGHT, | ||
35 | sizeof(LLVector4), // TYPE_CLOTHWEIGHT, | ||
36 | }; | ||
37 | |||
38 | //static | ||
39 | void LLVertexBuffer::initClass(bool use_vbo) | ||
40 | { | ||
41 | sEnableVBOs = use_vbo; | ||
42 | } | ||
43 | |||
44 | //static | ||
45 | void LLVertexBuffer::unbind() | ||
46 | { | ||
47 | if (sVBOActive) | ||
48 | { | ||
49 | glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); | ||
50 | sVBOActive = FALSE; | ||
51 | } | ||
52 | if (sIBOActive) | ||
53 | { | ||
54 | glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); | ||
55 | sIBOActive = FALSE; | ||
56 | } | ||
57 | |||
58 | sGLRenderBuffer = 0; | ||
59 | sGLRenderIndices = 0; | ||
60 | } | ||
61 | |||
62 | //static | ||
63 | void LLVertexBuffer::cleanupClass() | ||
64 | { | ||
65 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
66 | sLockedList.clear(); | ||
67 | startRender(); | ||
68 | stopRender(); | ||
69 | clientCopy(); // deletes GL buffers | ||
70 | } | ||
71 | |||
72 | //static, call before rendering VBOs | ||
73 | void LLVertexBuffer::startRender() | ||
74 | { | ||
75 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
76 | if (sEnableVBOs) | ||
77 | { | ||
78 | glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); | ||
79 | glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); | ||
80 | sVBOActive = FALSE; | ||
81 | sIBOActive = FALSE; | ||
82 | } | ||
83 | |||
84 | sRenderActive = TRUE; | ||
85 | sGLRenderBuffer = 0; | ||
86 | sGLRenderIndices = 0; | ||
87 | sLastMask = 0; | ||
88 | } | ||
89 | |||
90 | void LLVertexBuffer::stopRender() | ||
91 | { | ||
92 | sRenderActive = FALSE; | ||
93 | } | ||
94 | |||
95 | void LLVertexBuffer::clientCopy(F64 max_time) | ||
96 | { | ||
97 | if (!sDeleteList.empty()) | ||
98 | { | ||
99 | size_t num = sDeleteList.size(); | ||
100 | glDeleteBuffersARB(sDeleteList.size(), (GLuint*) &(sDeleteList[0])); | ||
101 | sDeleteList.clear(); | ||
102 | sGLCount -= num; | ||
103 | } | ||
104 | |||
105 | if (sEnableVBOs) | ||
106 | { | ||
107 | LLTimer timer; | ||
108 | BOOL reset = TRUE; | ||
109 | buffer_list_t::iterator iter = sLockedList.begin(); | ||
110 | while(iter != sLockedList.end()) | ||
111 | { | ||
112 | LLVertexBuffer* buffer = *iter; | ||
113 | if (buffer->isLocked() && buffer->useVBOs()) | ||
114 | { | ||
115 | buffer->setBuffer(0); | ||
116 | } | ||
117 | ++iter; | ||
118 | if (reset) | ||
119 | { | ||
120 | reset = FALSE; | ||
121 | timer.reset(); //skip first copy (don't count pipeline stall) | ||
122 | } | ||
123 | else | ||
124 | { | ||
125 | if (timer.getElapsedTimeF64() > max_time) | ||
126 | { | ||
127 | break; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | } | ||
132 | |||
133 | sLockedList.erase(sLockedList.begin(), iter); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | //---------------------------------------------------------------------------- | ||
138 | |||
139 | // For debugging | ||
140 | struct VTNC /// Simple | ||
141 | { | ||
142 | F32 v1,v2,v3; | ||
143 | F32 n1,n2,n3; | ||
144 | F32 t1,t2; | ||
145 | U32 c; | ||
146 | }; | ||
147 | static VTNC dbg_vtnc; | ||
148 | |||
149 | struct VTUNCB // Simple + Bump | ||
150 | { | ||
151 | F32 v1,v2,v3; | ||
152 | F32 n1,n2,n3; | ||
153 | F32 t1,t2; | ||
154 | F32 u1,u2; | ||
155 | F32 b1,b2,b3; | ||
156 | U32 c; | ||
157 | }; | ||
158 | static VTUNCB dbg_vtuncb; | ||
159 | |||
160 | struct VTUNC // Surfacepatch | ||
161 | { | ||
162 | F32 v1,v2,v3; | ||
163 | F32 n1,n2,n3; | ||
164 | F32 t1,t2; | ||
165 | F32 u1,u2; | ||
166 | U32 c; | ||
167 | }; | ||
168 | static VTUNC dbg_vtunc; | ||
169 | |||
170 | struct VTNW /// Avatar | ||
171 | { | ||
172 | F32 v1,v2,v3; | ||
173 | F32 n1,n2,n3; | ||
174 | F32 t1,t2; | ||
175 | F32 w; | ||
176 | }; | ||
177 | static VTNW dbg_vtnw; | ||
178 | |||
179 | struct VTNPAD /// Avatar Output | ||
180 | { | ||
181 | F32 v1,v2,v3,p1; | ||
182 | F32 n1,n2,n3,p2; | ||
183 | F32 t1,t2,p3,p4; | ||
184 | }; | ||
185 | static VTNPAD dbg_vtnpad; | ||
186 | |||
187 | //---------------------------------------------------------------------------- | ||
188 | |||
189 | LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) : | ||
190 | LLRefCount(), | ||
191 | mNumVerts(0), mNumIndices(0), mUsage(usage), mGLBuffer(0), mGLIndices(0), | ||
192 | mMappedData(NULL), | ||
193 | mMappedIndexData(NULL), mLocked(FALSE), | ||
194 | mFinal(FALSE), | ||
195 | mFilthy(FALSE), | ||
196 | mEmpty(TRUE), | ||
197 | mResized(FALSE) | ||
198 | { | ||
199 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
200 | if (!sEnableVBOs) | ||
201 | { | ||
202 | mUsage = GL_STREAM_DRAW_ARB; | ||
203 | } | ||
204 | |||
205 | S32 stride = 0; | ||
206 | for (S32 i=0; i<TYPE_MAX; i++) | ||
207 | { | ||
208 | U32 mask = 1<<i; | ||
209 | if (typemask & mask) | ||
210 | { | ||
211 | mOffsets[i] = stride; | ||
212 | stride += sTypeOffsets[i]; | ||
213 | } | ||
214 | } | ||
215 | mTypeMask = typemask; | ||
216 | mStride = stride; | ||
217 | sCount++; | ||
218 | } | ||
219 | |||
220 | // protected, use unref() | ||
221 | //virtual | ||
222 | LLVertexBuffer::~LLVertexBuffer() | ||
223 | { | ||
224 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
225 | destroyGLBuffer(); | ||
226 | destroyGLIndices(); | ||
227 | sCount--; | ||
228 | |||
229 | if (mLocked) | ||
230 | { | ||
231 | //pull off of locked list | ||
232 | for (buffer_list_t::iterator i = sLockedList.begin(); i != sLockedList.end(); ++i) | ||
233 | { | ||
234 | if (*i == this) | ||
235 | { | ||
236 | sLockedList.erase(i); | ||
237 | break; | ||
238 | } | ||
239 | } | ||
240 | } | ||
241 | }; | ||
242 | |||
243 | //---------------------------------------------------------------------------- | ||
244 | |||
245 | void LLVertexBuffer::createGLBuffer() | ||
246 | { | ||
247 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
248 | |||
249 | U32 size = getSize(); | ||
250 | if (mGLBuffer) | ||
251 | { | ||
252 | destroyGLBuffer(); | ||
253 | } | ||
254 | |||
255 | if (size == 0) | ||
256 | { | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | mMappedData = new U8[size]; | ||
261 | memset(mMappedData, 0, size); | ||
262 | mEmpty = TRUE; | ||
263 | |||
264 | if (useVBOs()) | ||
265 | { | ||
266 | glGenBuffersARB(1, (GLuint*) &mGLBuffer); | ||
267 | mResized = TRUE; | ||
268 | sGLCount++; | ||
269 | } | ||
270 | else | ||
271 | { | ||
272 | static int gl_buffer_idx = 0; | ||
273 | mGLBuffer = ++gl_buffer_idx; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | void LLVertexBuffer::createGLIndices() | ||
278 | { | ||
279 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
280 | U32 size = getIndicesSize(); | ||
281 | |||
282 | if (mGLIndices) | ||
283 | { | ||
284 | destroyGLIndices(); | ||
285 | } | ||
286 | |||
287 | if (size == 0) | ||
288 | { | ||
289 | return; | ||
290 | } | ||
291 | |||
292 | mMappedIndexData = new U8[size]; | ||
293 | memset(mMappedIndexData, 0, size); | ||
294 | mEmpty = TRUE; | ||
295 | |||
296 | if (useVBOs()) | ||
297 | { | ||
298 | glGenBuffersARB(1, (GLuint*) &mGLIndices); | ||
299 | mResized = TRUE; | ||
300 | sGLCount++; | ||
301 | } | ||
302 | else | ||
303 | { | ||
304 | static int gl_buffer_idx = 0; | ||
305 | mGLIndices = ++gl_buffer_idx; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | void LLVertexBuffer::destroyGLBuffer() | ||
310 | { | ||
311 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
312 | if (mGLBuffer) | ||
313 | { | ||
314 | if (useVBOs()) | ||
315 | { | ||
316 | sDeleteList.push_back(mGLBuffer); | ||
317 | } | ||
318 | |||
319 | delete [] mMappedData; | ||
320 | mMappedData = NULL; | ||
321 | mEmpty = TRUE; | ||
322 | sAllocatedBytes -= getSize(); | ||
323 | } | ||
324 | |||
325 | mGLBuffer = 0; | ||
326 | } | ||
327 | |||
328 | void LLVertexBuffer::destroyGLIndices() | ||
329 | { | ||
330 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
331 | if (mGLIndices) | ||
332 | { | ||
333 | if (useVBOs()) | ||
334 | { | ||
335 | sDeleteList.push_back(mGLIndices); | ||
336 | } | ||
337 | |||
338 | delete [] mMappedIndexData; | ||
339 | mMappedIndexData = NULL; | ||
340 | mEmpty = TRUE; | ||
341 | sAllocatedBytes -= getIndicesSize(); | ||
342 | } | ||
343 | |||
344 | mGLIndices = 0; | ||
345 | } | ||
346 | |||
347 | void LLVertexBuffer::updateNumVerts(S32 nverts) | ||
348 | { | ||
349 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
350 | if (!mDynamicSize) | ||
351 | { | ||
352 | mNumVerts = nverts; | ||
353 | } | ||
354 | else if (mUsage == GL_STATIC_DRAW_ARB || | ||
355 | nverts > mNumVerts || | ||
356 | nverts < mNumVerts/2) | ||
357 | { | ||
358 | if (mUsage != GL_STATIC_DRAW_ARB) | ||
359 | { | ||
360 | nverts += nverts/4; | ||
361 | } | ||
362 | |||
363 | mNumVerts = nverts; | ||
364 | } | ||
365 | } | ||
366 | |||
367 | void LLVertexBuffer::updateNumIndices(S32 nindices) | ||
368 | { | ||
369 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
370 | if (!mDynamicSize) | ||
371 | { | ||
372 | mNumIndices = nindices; | ||
373 | } | ||
374 | else if (mUsage == GL_STATIC_DRAW_ARB || | ||
375 | nindices > mNumIndices || | ||
376 | nindices < mNumIndices/2) | ||
377 | { | ||
378 | if (mUsage != GL_STATIC_DRAW_ARB) | ||
379 | { | ||
380 | nindices += nindices/4; | ||
381 | } | ||
382 | |||
383 | mNumIndices = nindices; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | void LLVertexBuffer::makeStatic() | ||
388 | { | ||
389 | if (!sEnableVBOs) | ||
390 | { | ||
391 | return; | ||
392 | } | ||
393 | |||
394 | if (sRenderActive) | ||
395 | { | ||
396 | llerrs << "Make static called during render." << llendl; | ||
397 | } | ||
398 | |||
399 | if (mUsage != GL_STATIC_DRAW_ARB) | ||
400 | { | ||
401 | if (useVBOs()) | ||
402 | { | ||
403 | if (mGLBuffer) | ||
404 | { | ||
405 | sDeleteList.push_back(mGLBuffer); | ||
406 | } | ||
407 | if (mGLIndices) | ||
408 | { | ||
409 | sDeleteList.push_back(mGLIndices); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | if (mGLBuffer) | ||
414 | { | ||
415 | sGLCount++; | ||
416 | glGenBuffersARB(1, (GLuint*) &mGLBuffer); | ||
417 | } | ||
418 | if (mGLIndices) | ||
419 | { | ||
420 | sGLCount++; | ||
421 | glGenBuffersARB(1, (GLuint*) &mGLIndices); | ||
422 | } | ||
423 | |||
424 | mUsage = GL_STATIC_DRAW_ARB; | ||
425 | mResized = TRUE; | ||
426 | |||
427 | if (!mLocked) | ||
428 | { | ||
429 | mLocked = TRUE; | ||
430 | sLockedList.push_back(this); | ||
431 | } | ||
432 | } | ||
433 | } | ||
434 | |||
435 | void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create) | ||
436 | { | ||
437 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
438 | |||
439 | updateNumVerts(nverts); | ||
440 | updateNumIndices(nindices); | ||
441 | |||
442 | if (mMappedData) | ||
443 | { | ||
444 | llerrs << "LLVertexBuffer::allocateBuffer() called redundantly." << llendl; | ||
445 | } | ||
446 | if (create && (nverts || nindices)) | ||
447 | { | ||
448 | createGLBuffer(); | ||
449 | createGLIndices(); | ||
450 | } | ||
451 | |||
452 | sAllocatedBytes += getSize() + getIndicesSize(); | ||
453 | } | ||
454 | |||
455 | void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices) | ||
456 | { | ||
457 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
458 | mDynamicSize = TRUE; | ||
459 | if (mUsage == GL_STATIC_DRAW_ARB) | ||
460 | { //always delete/allocate static buffers on resize | ||
461 | destroyGLBuffer(); | ||
462 | destroyGLIndices(); | ||
463 | allocateBuffer(newnverts, newnindices, TRUE); | ||
464 | mFinal = FALSE; | ||
465 | } | ||
466 | else if (newnverts > mNumVerts || newnindices > mNumIndices || | ||
467 | newnverts < mNumVerts/2 || newnindices < mNumIndices/2) | ||
468 | { | ||
469 | sAllocatedBytes -= getSize() + getIndicesSize(); | ||
470 | |||
471 | S32 oldsize = getSize(); | ||
472 | S32 old_index_size = getIndicesSize(); | ||
473 | |||
474 | updateNumVerts(newnverts); | ||
475 | updateNumIndices(newnindices); | ||
476 | |||
477 | S32 newsize = getSize(); | ||
478 | S32 new_index_size = getIndicesSize(); | ||
479 | |||
480 | sAllocatedBytes += newsize + new_index_size; | ||
481 | |||
482 | if (newsize) | ||
483 | { | ||
484 | if (!mGLBuffer) | ||
485 | { //no buffer exists, create a new one | ||
486 | createGLBuffer(); | ||
487 | } | ||
488 | else | ||
489 | { | ||
490 | //delete old buffer, keep GL buffer for now | ||
491 | U8* old = mMappedData; | ||
492 | mMappedData = new U8[newsize]; | ||
493 | if (old) | ||
494 | { | ||
495 | memcpy(mMappedData, old, llmin(newsize, oldsize)); | ||
496 | if (newsize > oldsize) | ||
497 | { | ||
498 | memset(mMappedData+oldsize, 0, newsize-oldsize); | ||
499 | } | ||
500 | |||
501 | delete [] old; | ||
502 | } | ||
503 | else | ||
504 | { | ||
505 | memset(mMappedData, 0, newsize); | ||
506 | mEmpty = TRUE; | ||
507 | } | ||
508 | mResized = TRUE; | ||
509 | } | ||
510 | } | ||
511 | else if (mGLBuffer) | ||
512 | { | ||
513 | destroyGLBuffer(); | ||
514 | } | ||
515 | |||
516 | if (new_index_size) | ||
517 | { | ||
518 | if (!mGLIndices) | ||
519 | { | ||
520 | createGLIndices(); | ||
521 | } | ||
522 | else | ||
523 | { | ||
524 | //delete old buffer, keep GL buffer for now | ||
525 | U8* old = mMappedIndexData; | ||
526 | mMappedIndexData = new U8[new_index_size]; | ||
527 | if (old) | ||
528 | { | ||
529 | memcpy(mMappedIndexData, old, llmin(new_index_size, old_index_size)); | ||
530 | if (new_index_size > old_index_size) | ||
531 | { | ||
532 | memset(mMappedIndexData+old_index_size, 0, new_index_size - old_index_size); | ||
533 | } | ||
534 | delete [] old; | ||
535 | } | ||
536 | else | ||
537 | { | ||
538 | memset(mMappedIndexData, 0, new_index_size); | ||
539 | mEmpty = TRUE; | ||
540 | } | ||
541 | mResized = TRUE; | ||
542 | } | ||
543 | } | ||
544 | else if (mGLIndices) | ||
545 | { | ||
546 | destroyGLIndices(); | ||
547 | } | ||
548 | } | ||
549 | } | ||
550 | |||
551 | BOOL LLVertexBuffer::useVBOs() const | ||
552 | { | ||
553 | //it's generally ineffective to use VBO for things that are streaming | ||
554 | //when we already have a client buffer around | ||
555 | if (mUsage == GL_STREAM_DRAW_ARB) | ||
556 | { | ||
557 | return FALSE; | ||
558 | } | ||
559 | |||
560 | return sEnableVBOs && (!sRenderActive || !mLocked); | ||
561 | } | ||
562 | |||
563 | //---------------------------------------------------------------------------- | ||
564 | |||
565 | // Map for data access | ||
566 | U8* LLVertexBuffer::mapBuffer(S32 access) | ||
567 | { | ||
568 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
569 | if (sRenderActive) | ||
570 | { | ||
571 | llwarns << "Buffer mapped during render frame!" << llendl; | ||
572 | } | ||
573 | if (!mGLBuffer && !mGLIndices) | ||
574 | { | ||
575 | llerrs << "LLVertexBuffer::mapBuffer() called before createGLBuffer" << llendl; | ||
576 | } | ||
577 | if (mFinal) | ||
578 | { | ||
579 | llerrs << "LLVertexBuffer::mapBuffer() called on a finalized buffer." << llendl; | ||
580 | } | ||
581 | if (!mMappedData && !mMappedIndexData) | ||
582 | { | ||
583 | llerrs << "LLVertexBuffer::mapBuffer() called on unallocated buffer." << llendl; | ||
584 | } | ||
585 | |||
586 | if (!mLocked && useVBOs()) | ||
587 | { | ||
588 | mLocked = TRUE; | ||
589 | sLockedList.push_back(this); | ||
590 | } | ||
591 | |||
592 | return mMappedData; | ||
593 | } | ||
594 | |||
595 | void LLVertexBuffer::unmapBuffer() | ||
596 | { | ||
597 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
598 | if (mMappedData || mMappedIndexData) | ||
599 | { | ||
600 | if (useVBOs() && mLocked) | ||
601 | { | ||
602 | if (mGLBuffer) | ||
603 | { | ||
604 | if (mResized) | ||
605 | { | ||
606 | glBufferDataARB(GL_ARRAY_BUFFER_ARB, getSize(), mMappedData, mUsage); | ||
607 | } | ||
608 | else | ||
609 | { | ||
610 | if (mEmpty || mDirtyRegions.empty()) | ||
611 | { | ||
612 | glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData); | ||
613 | } | ||
614 | else | ||
615 | { | ||
616 | for (std::vector<DirtyRegion>::iterator i = mDirtyRegions.begin(); i != mDirtyRegions.end(); ++i) | ||
617 | { | ||
618 | DirtyRegion& region = *i; | ||
619 | glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, region.mIndex*mStride, region.mCount*mStride, mMappedData + region.mIndex*mStride); | ||
620 | } | ||
621 | } | ||
622 | } | ||
623 | } | ||
624 | |||
625 | if (mGLIndices) | ||
626 | { | ||
627 | if (mResized) | ||
628 | { | ||
629 | glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, getIndicesSize(), mMappedIndexData, mUsage); | ||
630 | } | ||
631 | else | ||
632 | { | ||
633 | if (mEmpty || mDirtyRegions.empty()) | ||
634 | { | ||
635 | glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData); | ||
636 | } | ||
637 | else | ||
638 | { | ||
639 | for (std::vector<DirtyRegion>::iterator i = mDirtyRegions.begin(); i != mDirtyRegions.end(); ++i) | ||
640 | { | ||
641 | DirtyRegion& region = *i; | ||
642 | glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, region.mIndicesIndex*sizeof(U32), | ||
643 | region.mIndicesCount*sizeof(U32), mMappedIndexData + region.mIndicesIndex*sizeof(U32)); | ||
644 | } | ||
645 | } | ||
646 | } | ||
647 | } | ||
648 | |||
649 | mDirtyRegions.clear(); | ||
650 | mFilthy = FALSE; | ||
651 | mResized = FALSE; | ||
652 | |||
653 | if (mUsage == GL_STATIC_DRAW_ARB) | ||
654 | { //static draw buffers can only be mapped a single time | ||
655 | //throw out client data (we won't be using it again) | ||
656 | delete [] mMappedData; | ||
657 | delete [] mMappedIndexData; | ||
658 | mMappedIndexData = NULL; | ||
659 | mMappedData = NULL; | ||
660 | mEmpty = TRUE; | ||
661 | mFinal = TRUE; | ||
662 | } | ||
663 | else | ||
664 | { | ||
665 | mEmpty = FALSE; | ||
666 | } | ||
667 | |||
668 | mLocked = FALSE; | ||
669 | |||
670 | glFlush(); | ||
671 | } | ||
672 | } | ||
673 | } | ||
674 | |||
675 | //---------------------------------------------------------------------------- | ||
676 | |||
677 | template <class T,S32 type> struct VertexBufferStrider | ||
678 | { | ||
679 | typedef LLStrider<T> strider_t; | ||
680 | static bool get(LLVertexBuffer& vbo, | ||
681 | strider_t& strider, | ||
682 | S32 index) | ||
683 | { | ||
684 | vbo.mapBuffer(); | ||
685 | if (type == LLVertexBuffer::TYPE_INDEX) | ||
686 | { | ||
687 | S32 stride = sizeof(T); | ||
688 | strider = (T*)(vbo.getMappedIndices() + index*stride); | ||
689 | strider.setStride(0); | ||
690 | return TRUE; | ||
691 | } | ||
692 | else if (vbo.hasDataType(type)) | ||
693 | { | ||
694 | S32 stride = vbo.getStride(); | ||
695 | strider = (T*)(vbo.getMappedData() + vbo.getOffset(type) + index*stride); | ||
696 | strider.setStride(stride); | ||
697 | return TRUE; | ||
698 | } | ||
699 | else | ||
700 | { | ||
701 | llerrs << "VertexBufferStrider could not find valid vertex data." << llendl; | ||
702 | } | ||
703 | return FALSE; | ||
704 | } | ||
705 | }; | ||
706 | |||
707 | |||
708 | bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector3>& strider, S32 index) | ||
709 | { | ||
710 | return VertexBufferStrider<LLVector3,TYPE_VERTEX>::get(*this, strider, index); | ||
711 | } | ||
712 | bool LLVertexBuffer::getIndexStrider(LLStrider<U32>& strider, S32 index) | ||
713 | { | ||
714 | return VertexBufferStrider<U32,TYPE_INDEX>::get(*this, strider, index); | ||
715 | } | ||
716 | bool LLVertexBuffer::getTexCoordStrider(LLStrider<LLVector2>& strider, S32 index) | ||
717 | { | ||
718 | return VertexBufferStrider<LLVector2,TYPE_TEXCOORD>::get(*this, strider, index); | ||
719 | } | ||
720 | bool LLVertexBuffer::getTexCoord2Strider(LLStrider<LLVector2>& strider, S32 index) | ||
721 | { | ||
722 | return VertexBufferStrider<LLVector2,TYPE_TEXCOORD2>::get(*this, strider, index); | ||
723 | } | ||
724 | bool LLVertexBuffer::getNormalStrider(LLStrider<LLVector3>& strider, S32 index) | ||
725 | { | ||
726 | return VertexBufferStrider<LLVector3,TYPE_NORMAL>::get(*this, strider, index); | ||
727 | } | ||
728 | bool LLVertexBuffer::getBinormalStrider(LLStrider<LLVector3>& strider, S32 index) | ||
729 | { | ||
730 | return VertexBufferStrider<LLVector3,TYPE_BINORMAL>::get(*this, strider, index); | ||
731 | } | ||
732 | bool LLVertexBuffer::getColorStrider(LLStrider<LLColor4U>& strider, S32 index) | ||
733 | { | ||
734 | return VertexBufferStrider<LLColor4U,TYPE_COLOR>::get(*this, strider, index); | ||
735 | } | ||
736 | bool LLVertexBuffer::getWeightStrider(LLStrider<F32>& strider, S32 index) | ||
737 | { | ||
738 | return VertexBufferStrider<F32,TYPE_WEIGHT>::get(*this, strider, index); | ||
739 | } | ||
740 | bool LLVertexBuffer::getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index) | ||
741 | { | ||
742 | return VertexBufferStrider<LLVector4,TYPE_CLOTHWEIGHT>::get(*this, strider, index); | ||
743 | } | ||
744 | |||
745 | void LLVertexBuffer::setStride(S32 type, S32 new_stride) | ||
746 | { | ||
747 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
748 | if (mNumVerts) | ||
749 | { | ||
750 | llerrs << "LLVertexBuffer::setOffset called with mNumVerts = " << mNumVerts << llendl; | ||
751 | } | ||
752 | // This code assumes that setStride() will only be called once per VBO per type. | ||
753 | S32 delta = new_stride - sTypeOffsets[type]; | ||
754 | for (S32 i=type+1; i<TYPE_MAX; i++) | ||
755 | { | ||
756 | if (mTypeMask & (1<<i)) | ||
757 | { | ||
758 | mOffsets[i] += delta; | ||
759 | } | ||
760 | } | ||
761 | mStride += delta; | ||
762 | } | ||
763 | |||
764 | //---------------------------------------------------------------------------- | ||
765 | |||
766 | // Set for rendering | ||
767 | void LLVertexBuffer::setBuffer(U32 data_mask) | ||
768 | { | ||
769 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
770 | //set up pointers if the data mask is different ... | ||
771 | BOOL setup = (sLastMask != data_mask); | ||
772 | |||
773 | if (useVBOs()) | ||
774 | { | ||
775 | if (mGLBuffer && (mGLBuffer != sGLRenderBuffer || !sVBOActive)) | ||
776 | { | ||
777 | glBindBufferARB(GL_ARRAY_BUFFER_ARB, mGLBuffer); | ||
778 | sVBOActive = TRUE; | ||
779 | setup = TRUE; // ... or the bound buffer changed | ||
780 | } | ||
781 | if (mGLIndices && (mGLIndices != sGLRenderIndices || !sIBOActive)) | ||
782 | { | ||
783 | glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, mGLIndices); | ||
784 | sIBOActive = TRUE; | ||
785 | } | ||
786 | |||
787 | unmapBuffer(); | ||
788 | } | ||
789 | else | ||
790 | { | ||
791 | if (mGLBuffer) | ||
792 | { | ||
793 | if (sEnableVBOs && sVBOActive) | ||
794 | { | ||
795 | glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0); | ||
796 | sVBOActive = FALSE; | ||
797 | setup = TRUE; // ... or a VBO is deactivated | ||
798 | } | ||
799 | if (sGLRenderBuffer != mGLBuffer) | ||
800 | { | ||
801 | setup = TRUE; // ... or a client memory pointer changed | ||
802 | } | ||
803 | } | ||
804 | if (sEnableVBOs && mGLIndices && sIBOActive) | ||
805 | { | ||
806 | glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0); | ||
807 | sIBOActive = FALSE; | ||
808 | } | ||
809 | } | ||
810 | |||
811 | if (mGLIndices) | ||
812 | { | ||
813 | sGLRenderIndices = mGLIndices; | ||
814 | } | ||
815 | if (mGLBuffer) | ||
816 | { | ||
817 | sGLRenderBuffer = mGLBuffer; | ||
818 | if (data_mask && setup) | ||
819 | { | ||
820 | if (!sRenderActive) | ||
821 | { | ||
822 | llwarns << "Vertex buffer set for rendering outside of render frame." << llendl; | ||
823 | } | ||
824 | setupVertexBuffer(data_mask); // subclass specific setup (virtual function) | ||
825 | sLastMask = data_mask; | ||
826 | } | ||
827 | } | ||
828 | } | ||
829 | |||
830 | // virtual (default) | ||
831 | void LLVertexBuffer::setupVertexBuffer(U32 data_mask) const | ||
832 | { | ||
833 | LLMemType mt(LLMemType::MTYPE_VERTEX_DATA); | ||
834 | stop_glerror(); | ||
835 | U8* base = useVBOs() ? NULL : mMappedData; | ||
836 | S32 stride = mStride; | ||
837 | |||
838 | if ((data_mask & mTypeMask) != data_mask) | ||
839 | { | ||
840 | llerrs << "LLVertexBuffer::setupVertexBuffer missing required components for supplied data mask." << llendl; | ||
841 | } | ||
842 | |||
843 | if (data_mask & MAP_VERTEX) | ||
844 | { | ||
845 | glVertexPointer(3,GL_FLOAT, stride, (void*)(base + 0)); | ||
846 | } | ||
847 | if (data_mask & MAP_NORMAL) | ||
848 | { | ||
849 | glNormalPointer(GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_NORMAL])); | ||
850 | } | ||
851 | if (data_mask & MAP_TEXCOORD2) | ||
852 | { | ||
853 | glClientActiveTextureARB(GL_TEXTURE1_ARB); | ||
854 | glTexCoordPointer(2,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_TEXCOORD2])); | ||
855 | } | ||
856 | if (data_mask & MAP_TEXCOORD) | ||
857 | { | ||
858 | glClientActiveTextureARB(GL_TEXTURE0_ARB); | ||
859 | glTexCoordPointer(2,GL_FLOAT, stride, (void*)(base + mOffsets[TYPE_TEXCOORD])); | ||
860 | } | ||
861 | if (data_mask & MAP_COLOR) | ||
862 | { | ||
863 | glColorPointer(4, GL_UNSIGNED_BYTE, stride, (void*)(base + mOffsets[TYPE_COLOR])); | ||
864 | } | ||
865 | if (data_mask & MAP_BINORMAL) | ||
866 | { | ||
867 | glVertexAttribPointerARB(6, 3, GL_FLOAT, FALSE, stride, (void*)(base + mOffsets[TYPE_BINORMAL])); | ||
868 | } | ||
869 | if (data_mask & MAP_WEIGHT) | ||
870 | { | ||
871 | glVertexAttribPointerARB(1, 1, GL_FLOAT, FALSE, stride, (void*)(base + mOffsets[TYPE_WEIGHT])); | ||
872 | } | ||
873 | if (data_mask & MAP_CLOTHWEIGHT) | ||
874 | { | ||
875 | glVertexAttribPointerARB(4, 4, GL_FLOAT, TRUE, stride, (void*)(base + mOffsets[TYPE_CLOTHWEIGHT])); | ||
876 | } | ||
877 | |||
878 | llglassertok(); | ||
879 | } | ||
880 | |||
881 | void LLVertexBuffer::markDirty(U32 vert_index, U32 vert_count, U32 indices_index, U32 indices_count) | ||
882 | { | ||
883 | if (useVBOs() && !mFilthy) | ||
884 | { | ||
885 | if (!mDirtyRegions.empty()) | ||
886 | { | ||
887 | DirtyRegion& region = *(mDirtyRegions.rbegin()); | ||
888 | |||
889 | if (region.mIndex+region.mCount > vert_index) | ||
890 | { | ||
891 | //this buffer has received multiple updates since the last copy, mark it filthy | ||
892 | mFilthy = TRUE; | ||
893 | mDirtyRegions.clear(); | ||
894 | return; | ||
895 | } | ||
896 | |||
897 | if (region.mIndex + region.mCount == vert_index && | ||
898 | region.mIndicesIndex + region.mIndicesCount == indices_index) | ||
899 | { | ||
900 | region.mCount += vert_count; | ||
901 | region.mIndicesCount += indices_count; | ||
902 | return; | ||
903 | } | ||
904 | } | ||
905 | |||
906 | mDirtyRegions.push_back(DirtyRegion(vert_index,vert_count,indices_index,indices_count)); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | void LLVertexBuffer::markClean() | ||
911 | { | ||
912 | if (!mResized && !mEmpty && !mFilthy) | ||
913 | { | ||
914 | buffer_list_t::reverse_iterator iter = sLockedList.rbegin(); | ||
915 | if (*iter == this) | ||
916 | { | ||
917 | mLocked = FALSE; | ||
918 | sLockedList.pop_back(); | ||
919 | } | ||
920 | } | ||
921 | } | ||
922 | |||