1 | /* |
---|
2 | * This file Copyright (C) Mnemosyne LLC |
---|
3 | * |
---|
4 | * This file is licensed by the GPL version 2. Works owned by the |
---|
5 | * Transmission project are granted a special exemption to clause 2(b) |
---|
6 | * so that the bulk of its code can remain under the MIT license. |
---|
7 | * This exemption does not extend to derived works not owned by |
---|
8 | * the Transmission project. |
---|
9 | * |
---|
10 | * $Id: bandwidth.c 12420 2011-05-09 04:16:49Z jordan $ |
---|
11 | */ |
---|
12 | |
---|
13 | #include <assert.h> |
---|
14 | #include <limits.h> |
---|
15 | #include <string.h> /* memset() */ |
---|
16 | |
---|
17 | #include "transmission.h" |
---|
18 | #include "bandwidth.h" |
---|
19 | #include "crypto.h" /* tr_cryptoWeakRandInt() */ |
---|
20 | #include "peer-io.h" |
---|
21 | #include "utils.h" |
---|
22 | |
---|
23 | #define dbgmsg( ... ) \ |
---|
24 | do { \ |
---|
25 | if( tr_deepLoggingIsActive( ) ) \ |
---|
26 | tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \ |
---|
27 | } while( 0 ) |
---|
28 | |
---|
29 | /*** |
---|
30 | **** |
---|
31 | ***/ |
---|
32 | |
---|
33 | static unsigned int |
---|
34 | getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now ) |
---|
35 | { |
---|
36 | if( !now ) |
---|
37 | now = tr_time_msec(); |
---|
38 | |
---|
39 | if( now != r->cache_time ) |
---|
40 | { |
---|
41 | int i = r->newest; |
---|
42 | uint64_t bytes = 0; |
---|
43 | const uint64_t cutoff = now - interval_msec; |
---|
44 | struct bratecontrol * rvolatile = (struct bratecontrol*) r; |
---|
45 | |
---|
46 | for( ;; ) |
---|
47 | { |
---|
48 | if( r->transfers[i].date <= cutoff ) |
---|
49 | break; |
---|
50 | |
---|
51 | bytes += r->transfers[i].size; |
---|
52 | |
---|
53 | if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */ |
---|
54 | if( i == r->newest ) break; /* we've come all the way around */ |
---|
55 | } |
---|
56 | |
---|
57 | rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec); |
---|
58 | rvolatile->cache_time = now; |
---|
59 | } |
---|
60 | |
---|
61 | return r->cache_val; |
---|
62 | } |
---|
63 | |
---|
64 | static void |
---|
65 | bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size ) |
---|
66 | { |
---|
67 | if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now ) |
---|
68 | r->transfers[r->newest].size += size; |
---|
69 | else |
---|
70 | { |
---|
71 | if( ++r->newest == HISTORY_SIZE ) r->newest = 0; |
---|
72 | r->transfers[r->newest].date = now; |
---|
73 | r->transfers[r->newest].size = size; |
---|
74 | } |
---|
75 | |
---|
76 | /* invalidate cache_val*/ |
---|
77 | r->cache_time = 0; |
---|
78 | } |
---|
79 | |
---|
80 | /****** |
---|
81 | ******* |
---|
82 | ******* |
---|
83 | ******/ |
---|
84 | |
---|
85 | static inline int |
---|
86 | comparePointers( const void * a, const void * b ) |
---|
87 | { |
---|
88 | if( a != b ) |
---|
89 | return a < b ? -1 : 1; |
---|
90 | |
---|
91 | return 0; |
---|
92 | } |
---|
93 | |
---|
94 | /*** |
---|
95 | **** |
---|
96 | ***/ |
---|
97 | |
---|
98 | void |
---|
99 | tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent ) |
---|
100 | { |
---|
101 | b->session = session; |
---|
102 | b->children = TR_PTR_ARRAY_INIT; |
---|
103 | b->magicNumber = BANDWIDTH_MAGIC_NUMBER; |
---|
104 | b->band[TR_UP].honorParentLimits = true; |
---|
105 | b->band[TR_DOWN].honorParentLimits = true; |
---|
106 | tr_bandwidthSetParent( b, parent ); |
---|
107 | } |
---|
108 | |
---|
109 | void |
---|
110 | tr_bandwidthDestruct( tr_bandwidth * b ) |
---|
111 | { |
---|
112 | assert( tr_isBandwidth( b ) ); |
---|
113 | |
---|
114 | tr_bandwidthSetParent( b, NULL ); |
---|
115 | tr_ptrArrayDestruct( &b->children, NULL ); |
---|
116 | |
---|
117 | memset( b, ~0, sizeof( tr_bandwidth ) ); |
---|
118 | } |
---|
119 | |
---|
120 | /*** |
---|
121 | **** |
---|
122 | ***/ |
---|
123 | |
---|
124 | void |
---|
125 | tr_bandwidthSetParent( tr_bandwidth * b, |
---|
126 | tr_bandwidth * parent ) |
---|
127 | { |
---|
128 | assert( tr_isBandwidth( b ) ); |
---|
129 | assert( b != parent ); |
---|
130 | |
---|
131 | if( b->parent ) |
---|
132 | { |
---|
133 | void * removed; |
---|
134 | |
---|
135 | assert( tr_isBandwidth( b->parent ) ); |
---|
136 | |
---|
137 | removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, comparePointers ); |
---|
138 | assert( removed == b ); |
---|
139 | assert( tr_ptrArrayFindSorted( &b->parent->children, b, comparePointers ) == NULL ); |
---|
140 | |
---|
141 | b->parent = NULL; |
---|
142 | } |
---|
143 | |
---|
144 | if( parent ) |
---|
145 | { |
---|
146 | assert( tr_isBandwidth( parent ) ); |
---|
147 | assert( parent->parent != b ); |
---|
148 | |
---|
149 | tr_ptrArrayInsertSorted( &parent->children, b, comparePointers ); |
---|
150 | assert( tr_ptrArrayFindSorted( &parent->children, b, comparePointers ) == b ); |
---|
151 | b->parent = parent; |
---|
152 | } |
---|
153 | } |
---|
154 | |
---|
155 | /*** |
---|
156 | **** |
---|
157 | ***/ |
---|
158 | #if 0 |
---|
159 | #warning do not check the code in with this enabled |
---|
160 | #define DEBUG_DIRECTION TR_UP |
---|
161 | #endif |
---|
162 | |
---|
163 | static void |
---|
164 | allocateBandwidth( tr_bandwidth * b, |
---|
165 | tr_priority_t parent_priority, |
---|
166 | tr_direction dir, |
---|
167 | unsigned int period_msec, |
---|
168 | tr_ptrArray * peer_pool ) |
---|
169 | { |
---|
170 | tr_priority_t priority; |
---|
171 | |
---|
172 | assert( tr_isBandwidth( b ) ); |
---|
173 | assert( tr_isDirection( dir ) ); |
---|
174 | |
---|
175 | /* set the available bandwidth */ |
---|
176 | if( b->band[dir].isLimited ) |
---|
177 | { |
---|
178 | const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps; |
---|
179 | b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u; |
---|
180 | |
---|
181 | #ifdef DEBUG_DIRECTION |
---|
182 | if( dir == DEBUG_DIRECTION ) |
---|
183 | fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n", |
---|
184 | b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, |
---|
185 | b->band[dir].bytesLeft ); |
---|
186 | #endif |
---|
187 | } |
---|
188 | |
---|
189 | priority = MAX( parent_priority, b->priority ); |
---|
190 | |
---|
191 | /* add this bandwidth's peer, if any, to the peer pool */ |
---|
192 | if( b->peer != NULL ) { |
---|
193 | b->peer->priority = priority; |
---|
194 | tr_ptrArrayAppend( peer_pool, b->peer ); |
---|
195 | } |
---|
196 | |
---|
197 | #ifdef DEBUG_DIRECTION |
---|
198 | if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) |
---|
199 | fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); |
---|
200 | #endif |
---|
201 | |
---|
202 | /* traverse & repeat for the subtree */ |
---|
203 | if( 1 ) { |
---|
204 | int i; |
---|
205 | struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children ); |
---|
206 | const int n = tr_ptrArraySize( &b->children ); |
---|
207 | for( i=0; i<n; ++i ) |
---|
208 | allocateBandwidth( children[i], priority, dir, period_msec, peer_pool ); |
---|
209 | } |
---|
210 | } |
---|
211 | |
---|
212 | static void |
---|
213 | phaseOne( tr_ptrArray * peerArray, tr_direction dir ) |
---|
214 | { |
---|
215 | int i, n; |
---|
216 | int peerCount = tr_ptrArraySize( peerArray ); |
---|
217 | struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray ); |
---|
218 | |
---|
219 | /* First phase of IO. Tries to distribute bandwidth fairly to keep faster |
---|
220 | * peers from starving the others. Loop through the peers, giving each a |
---|
221 | * small chunk of bandwidth. Keep looping until we run out of bandwidth |
---|
222 | * and/or peers that can use it */ |
---|
223 | n = peerCount; |
---|
224 | dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") ); |
---|
225 | i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */ |
---|
226 | while( n > 0 ) |
---|
227 | { |
---|
228 | /* value of 3000 bytes chosen so that when using uTP we'll send a full-size |
---|
229 | * frame right away and leave enough buffered data for the next frame to go |
---|
230 | * out in a timely manner. */ |
---|
231 | const size_t increment = 3000; |
---|
232 | |
---|
233 | const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment ); |
---|
234 | |
---|
235 | dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed ); |
---|
236 | |
---|
237 | if( bytesUsed == (int)increment ) |
---|
238 | ++i; |
---|
239 | else { |
---|
240 | /* peer is done writing for now; move it to the end of the list */ |
---|
241 | tr_peerIo * pio = peers[i]; |
---|
242 | peers[i] = peers[n-1]; |
---|
243 | peers[n-1] = pio; |
---|
244 | --n; |
---|
245 | } |
---|
246 | |
---|
247 | if( i >= n ) |
---|
248 | i = 0; |
---|
249 | } |
---|
250 | } |
---|
251 | |
---|
252 | void |
---|
253 | tr_bandwidthAllocate( tr_bandwidth * b, |
---|
254 | tr_direction dir, |
---|
255 | unsigned int period_msec ) |
---|
256 | { |
---|
257 | int i, peerCount; |
---|
258 | tr_ptrArray tmp = TR_PTR_ARRAY_INIT; |
---|
259 | tr_ptrArray low = TR_PTR_ARRAY_INIT; |
---|
260 | tr_ptrArray high = TR_PTR_ARRAY_INIT; |
---|
261 | tr_ptrArray normal = TR_PTR_ARRAY_INIT; |
---|
262 | struct tr_peerIo ** peers; |
---|
263 | |
---|
264 | /* allocateBandwidth() is a helper function with two purposes: |
---|
265 | * 1. allocate bandwidth to b and its subtree |
---|
266 | * 2. accumulate an array of all the peerIos from b and its subtree. */ |
---|
267 | allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); |
---|
268 | peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); |
---|
269 | peerCount = tr_ptrArraySize( &tmp ); |
---|
270 | |
---|
271 | for( i=0; i<peerCount; ++i ) |
---|
272 | { |
---|
273 | tr_peerIo * io = peers[i]; |
---|
274 | tr_peerIoRef( io ); |
---|
275 | |
---|
276 | tr_peerIoFlushOutgoingProtocolMsgs( io ); |
---|
277 | |
---|
278 | switch( io->priority ) { |
---|
279 | case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ |
---|
280 | case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ |
---|
281 | default: tr_ptrArrayAppend( &low, io ); |
---|
282 | } |
---|
283 | } |
---|
284 | |
---|
285 | /* First phase of IO. Tries to distribute bandwidth fairly to keep faster |
---|
286 | * peers from starving the others. Loop through the peers, giving each a |
---|
287 | * small chunk of bandwidth. Keep looping until we run out of bandwidth |
---|
288 | * and/or peers that can use it */ |
---|
289 | phaseOne( &high, dir ); |
---|
290 | phaseOne( &normal, dir ); |
---|
291 | phaseOne( &low, dir ); |
---|
292 | |
---|
293 | /* Second phase of IO. To help us scale in high bandwidth situations, |
---|
294 | * enable on-demand IO for peers with bandwidth left to burn. |
---|
295 | * This on-demand IO is enabled until (1) the peer runs out of bandwidth, |
---|
296 | * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ |
---|
297 | for( i=0; i<peerCount; ++i ) |
---|
298 | tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); |
---|
299 | |
---|
300 | for( i=0; i<peerCount; ++i ) |
---|
301 | tr_peerIoUnref( peers[i] ); |
---|
302 | |
---|
303 | /* cleanup */ |
---|
304 | tr_ptrArrayDestruct( &normal, NULL ); |
---|
305 | tr_ptrArrayDestruct( &high, NULL ); |
---|
306 | tr_ptrArrayDestruct( &low, NULL ); |
---|
307 | tr_ptrArrayDestruct( &tmp, NULL ); |
---|
308 | } |
---|
309 | |
---|
310 | void |
---|
311 | tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer ) |
---|
312 | { |
---|
313 | assert( tr_isBandwidth( b ) ); |
---|
314 | assert( ( peer == NULL ) || tr_isPeerIo( peer ) ); |
---|
315 | |
---|
316 | b->peer = peer; |
---|
317 | } |
---|
318 | |
---|
319 | /*** |
---|
320 | **** |
---|
321 | ***/ |
---|
322 | |
---|
323 | static unsigned int |
---|
324 | bandwidthClamp( const tr_bandwidth * b, |
---|
325 | uint64_t now, |
---|
326 | tr_direction dir, |
---|
327 | unsigned int byteCount ) |
---|
328 | { |
---|
329 | assert( tr_isBandwidth( b ) ); |
---|
330 | assert( tr_isDirection( dir ) ); |
---|
331 | |
---|
332 | if( b ) |
---|
333 | { |
---|
334 | if( b->band[dir].isLimited ) |
---|
335 | { |
---|
336 | byteCount = MIN( byteCount, b->band[dir].bytesLeft ); |
---|
337 | |
---|
338 | /* if we're getting close to exceeding the speed limit, |
---|
339 | * clamp down harder on the bytes available */ |
---|
340 | if( byteCount > 0 ) |
---|
341 | { |
---|
342 | double current; |
---|
343 | double desired; |
---|
344 | double r; |
---|
345 | |
---|
346 | if( now == 0 ) |
---|
347 | now = tr_time_msec( ); |
---|
348 | |
---|
349 | current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN ); |
---|
350 | desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN ); |
---|
351 | r = desired >= 1 ? current / desired : 0; |
---|
352 | |
---|
353 | if( r > 1.0 ) byteCount = 0; |
---|
354 | else if( r > 0.9 ) byteCount *= 0.8; |
---|
355 | else if( r > 0.8 ) byteCount *= 0.9; |
---|
356 | } |
---|
357 | } |
---|
358 | |
---|
359 | if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) ) |
---|
360 | byteCount = bandwidthClamp( b->parent, now, dir, byteCount ); |
---|
361 | } |
---|
362 | |
---|
363 | return byteCount; |
---|
364 | } |
---|
365 | unsigned int |
---|
366 | tr_bandwidthClamp( const tr_bandwidth * b, |
---|
367 | tr_direction dir, |
---|
368 | unsigned int byteCount ) |
---|
369 | { |
---|
370 | return bandwidthClamp( b, 0, dir, byteCount ); |
---|
371 | } |
---|
372 | |
---|
373 | |
---|
374 | unsigned int |
---|
375 | tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir ) |
---|
376 | { |
---|
377 | assert( tr_isBandwidth( b ) ); |
---|
378 | assert( tr_isDirection( dir ) ); |
---|
379 | |
---|
380 | return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now ); |
---|
381 | } |
---|
382 | |
---|
383 | unsigned int |
---|
384 | tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir ) |
---|
385 | { |
---|
386 | assert( tr_isBandwidth( b ) ); |
---|
387 | assert( tr_isDirection( dir ) ); |
---|
388 | |
---|
389 | return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now ); |
---|
390 | } |
---|
391 | |
---|
392 | void |
---|
393 | tr_bandwidthUsed( tr_bandwidth * b, |
---|
394 | tr_direction dir, |
---|
395 | size_t byteCount, |
---|
396 | bool isPieceData, |
---|
397 | uint64_t now ) |
---|
398 | { |
---|
399 | struct tr_band * band; |
---|
400 | |
---|
401 | assert( tr_isBandwidth( b ) ); |
---|
402 | assert( tr_isDirection( dir ) ); |
---|
403 | |
---|
404 | band = &b->band[dir]; |
---|
405 | |
---|
406 | if( band->isLimited && isPieceData ) |
---|
407 | band->bytesLeft -= MIN( band->bytesLeft, byteCount ); |
---|
408 | |
---|
409 | #ifdef DEBUG_DIRECTION |
---|
410 | if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) ) |
---|
411 | fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n", |
---|
412 | b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft ); |
---|
413 | #endif |
---|
414 | |
---|
415 | bytesUsed( now, &band->raw, byteCount ); |
---|
416 | |
---|
417 | if( isPieceData ) |
---|
418 | bytesUsed( now, &band->piece, byteCount ); |
---|
419 | |
---|
420 | if( b->parent != NULL ) |
---|
421 | tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now ); |
---|
422 | } |
---|