1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#include "concurrent_vector_v2.h"
18#include "tbb/tbb_machine.h"
19#include "../tbb/itt_notify.h"
20#include "tbb/task.h"
21
22#include <stdexcept> // std::length_error
23#include <cstring>
24
25#if defined(_MSC_VER) && defined(_Wp64)
26 // Workaround for overzealous compiler warnings in /Wp64 mode
27 #pragma warning (disable: 4267)
28#endif
29
30namespace tbb {
31
32namespace internal {
33
34void concurrent_vector_base::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init ) {
35 size_type e = my_early_size;
36 while( e<new_size ) {
37 size_type f = my_early_size.compare_and_swap(new_size,e);
38 if( f==e ) {
39 internal_grow( e, new_size, element_size, init );
40 return;
41 }
42 e = f;
43 }
44}
45
46class concurrent_vector_base::helper {
47 static void extend_segment( concurrent_vector_base& v );
48public:
49 static segment_index_t find_segment_end( const concurrent_vector_base& v ) {
50 const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64;
51 const size_t pointers_per_short_segment = 2;
52 //unsigned u = v.my_segment==v.my_storage ? pointers_per_short_segment : pointers_per_long_segment;
53 segment_index_t u = v.my_segment==(&(v.my_storage[0])) ? pointers_per_short_segment : pointers_per_long_segment;
54 segment_index_t k = 0;
55 while( k<u && v.my_segment[k].array )
56 ++k;
57 return k;
58 }
59 static void extend_segment_if_necessary( concurrent_vector_base& v, size_t k ) {
60 const size_t pointers_per_short_segment = 2;
61 if( k>=pointers_per_short_segment && v.my_segment==v.my_storage ) {
62 extend_segment(v);
63 }
64 }
65};
66
67void concurrent_vector_base::helper::extend_segment( concurrent_vector_base& v ) {
68 const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64;
69 segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_segment, sizeof(segment_t), NULL );
70 std::memset( static_cast<void*>(s), 0, pointers_per_long_segment*sizeof(segment_t) );
71 // If other threads are trying to set pointers in the short segment, wait for them to finish their
72 // assignments before we copy the short segment to the long segment.
73 atomic_backoff backoff;
74 while( !v.my_storage[0].array || !v.my_storage[1].array ) backoff.pause();
75 s[0] = v.my_storage[0];
76 s[1] = v.my_storage[1];
77 if( v.my_segment.compare_and_swap( s, v.my_storage )!=v.my_storage )
78 NFS_Free(s);
79}
80
81concurrent_vector_base::size_type concurrent_vector_base::internal_capacity() const {
82 return segment_base( helper::find_segment_end(*this) );
83}
84
85void concurrent_vector_base::internal_reserve( size_type n, size_type element_size, size_type max_size ) {
86 if( n>max_size ) {
87 __TBB_THROW( std::length_error("argument to concurrent_vector::reserve exceeds concurrent_vector::max_size()") );
88 }
89 for( segment_index_t k = helper::find_segment_end(*this); segment_base(k)<n; ++k ) {
90 helper::extend_segment_if_necessary(*this,k);
91 size_t m = segment_size(k);
92 __TBB_ASSERT( !my_segment[k].array, "concurrent operation during reserve(...)?" );
93 my_segment[k].array = NFS_Allocate( m, element_size, NULL );
94 }
95}
96
97void concurrent_vector_base::internal_copy( const concurrent_vector_base& src, size_type element_size, internal_array_op2 copy ) {
98 size_type n = src.my_early_size;
99 my_early_size = n;
100 my_segment = my_storage;
101 if( n ) {
102 size_type b;
103 for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
104 helper::extend_segment_if_necessary(*this,k);
105 size_t m = segment_size(k);
106 __TBB_ASSERT( !my_segment[k].array, "concurrent operation during copy construction?" );
107 my_segment[k].array = NFS_Allocate( m, element_size, NULL );
108 if( m>n-b ) m = n-b;
109 copy( my_segment[k].array, src.my_segment[k].array, m );
110 }
111 }
112}
113
114void concurrent_vector_base::internal_assign( const concurrent_vector_base& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) {
115 size_type n = src.my_early_size;
116 while( my_early_size>n ) {
117 segment_index_t k = segment_index_of( my_early_size-1 );
118 size_type b=segment_base(k);
119 size_type new_end = b>=n ? b : n;
120 __TBB_ASSERT( my_early_size>new_end, NULL );
121 destroy( (char*)my_segment[k].array+element_size*(new_end-b), my_early_size-new_end );
122 my_early_size = new_end;
123 }
124 size_type dst_initialized_size = my_early_size;
125 my_early_size = n;
126 size_type b;
127 for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
128 helper::extend_segment_if_necessary(*this,k);
129 size_t m = segment_size(k);
130 if( !my_segment[k].array )
131 my_segment[k].array = NFS_Allocate( m, element_size, NULL );
132 if( m>n-b ) m = n-b;
133 size_type a = 0;
134 if( dst_initialized_size>b ) {
135 a = dst_initialized_size-b;
136 if( a>m ) a = m;
137 assign( my_segment[k].array, src.my_segment[k].array, a );
138 m -= a;
139 a *= element_size;
140 }
141 if( m>0 )
142 copy( (char*)my_segment[k].array+a, (char*)src.my_segment[k].array+a, m );
143 }
144 __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" );
145}
146
147void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) {
148 __TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL );
149 //size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size);
150 size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size);
151 index = tmp;
152 segment_index_t k_old = segment_index_of( tmp );
153 size_type base = segment_base(k_old);
154 helper::extend_segment_if_necessary(*this,k_old);
155 segment_t& s = my_segment[k_old];
156 void* array = s.array;
157 if( !array ) {
158 // FIXME - consider factoring this out and share with internal_grow_by
159 if( base==tmp ) {
160 __TBB_ASSERT( !s.array, NULL );
161 size_t n = segment_size(k_old);
162 array = NFS_Allocate( n, element_size, NULL );
163 ITT_NOTIFY( sync_releasing, &s.array );
164 s.array = array;
165 } else {
166 ITT_NOTIFY(sync_prepare, &s.array);
167 spin_wait_while_eq( s.array, (void*)0 );
168 ITT_NOTIFY(sync_acquired, &s.array);
169 array = s.array;
170 }
171 }
172 size_type j_begin = tmp-base;
173 return (void*)((char*)array+element_size*j_begin);
174}
175
176concurrent_vector_base::size_type concurrent_vector_base::internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init ) {
177 size_type result = my_early_size.fetch_and_add(delta);
178 internal_grow( result, result+delta, element_size, init );
179 return result;
180}
181
182void concurrent_vector_base::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op1 init ) {
183 __TBB_ASSERT( start<finish, "start must be less than finish" );
184 size_t tmp = start;
185 do {
186 segment_index_t k_old = segment_index_of( tmp );
187 size_type base = segment_base(k_old);
188 size_t n = segment_size(k_old);
189 helper::extend_segment_if_necessary(*this,k_old);
190 segment_t& s = my_segment[k_old];
191 void* array = s.array;
192 if( !array ) {
193 if( base==tmp ) {
194 __TBB_ASSERT( !s.array, NULL );
195 array = NFS_Allocate( n, element_size, NULL );
196 ITT_NOTIFY( sync_releasing, &s.array );
197 s.array = array;
198 } else {
199 ITT_NOTIFY(sync_prepare, &s.array);
200 spin_wait_while_eq( s.array, (void*)0 );
201 ITT_NOTIFY(sync_acquired, &s.array);
202 array = s.array;
203 }
204 }
205 size_type j_begin = tmp-base;
206 size_type j_end = n > finish-base ? finish-base : n;
207 (*init)( (void*)((char*)array+element_size*j_begin), j_end-j_begin );
208 tmp = base+j_end;
209 } while( tmp<finish );
210}
211
212void concurrent_vector_base::internal_clear( internal_array_op1 destroy, bool reclaim_storage ) {
213 // Set "my_early_size" early, so that subscripting errors can be caught.
214 // FIXME - doing so may be hurting exception safety
215 __TBB_ASSERT( my_segment, NULL );
216 size_type finish = my_early_size;
217 my_early_size = 0;
218 while( finish>0 ) {
219 segment_index_t k_old = segment_index_of(finish-1);
220 segment_t& s = my_segment[k_old];
221 __TBB_ASSERT( s.array, NULL );
222 size_type base = segment_base(k_old);
223 size_type j_end = finish-base;
224 __TBB_ASSERT( j_end, NULL );
225 (*destroy)( s.array, j_end );
226 finish = base;
227 }
228
229 // Free the arrays
230 if( reclaim_storage ) {
231 size_t k = helper::find_segment_end(*this);
232 while( k>0 ) {
233 --k;
234 segment_t& s = my_segment[k];
235 void* array = s.array;
236 s.array = NULL;
237 NFS_Free( array );
238 }
239 // Clear short segment.
240 my_storage[0].array = NULL;
241 my_storage[1].array = NULL;
242 segment_t* s = my_segment;
243 if( s!=my_storage ) {
244 my_segment = my_storage;
245 NFS_Free( s );
246 }
247 }
248}
249
250} // namespace internal
251
252} // tbb
253