source: trunk/libtransmission/bandwidth.c @ 12545

Last change on this file since 12545 was 12509, checked in by jordan, 10 years ago

(trunk libT) add a unique key to each tr_bandwidth object, so that when sorting them arbitrarily we can use that key rather than their pointer address. Apparently comparing pointers that aren't allocated in the same array is undefined behavior.

  • Property svn:keywords set to Date Rev Author Id
File size: 12.0 KB
Line 
1/*
2 * This file Copyright (C) Mnemosyne LLC
3 *
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
9 *
10 * $Id: bandwidth.c 12509 2011-06-19 18:34:10Z jordan $
11 */
12
13#include <assert.h>
14#include <limits.h>
15#include <string.h> /* memset() */
16
17#include "transmission.h"
18#include "bandwidth.h"
19#include "crypto.h" /* tr_cryptoWeakRandInt() */
20#include "peer-io.h"
21#include "utils.h"
22
23#define dbgmsg( ... ) \
24    do { \
25        if( tr_deepLoggingIsActive( ) ) \
26            tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \
27    } while( 0 )
28
29/***
30****
31***/
32
33static unsigned int
34getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now )
35{
36    if( !now )
37        now = tr_time_msec();
38
39    if( now != r->cache_time )
40    {
41        int i = r->newest;
42        uint64_t bytes = 0;
43        const uint64_t cutoff = now - interval_msec;
44        struct bratecontrol * rvolatile = (struct bratecontrol*) r;
45
46        for( ;; )
47        {
48            if( r->transfers[i].date <= cutoff )
49                break;
50
51            bytes += r->transfers[i].size;
52
53            if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */
54            if( i == r->newest ) break; /* we've come all the way around */
55        }
56
57        rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec);
58        rvolatile->cache_time = now;
59    }
60
61    return r->cache_val;
62}
63
64static void
65bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size )
66{
67    if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now )
68        r->transfers[r->newest].size += size;
69    else
70    {
71        if( ++r->newest == HISTORY_SIZE ) r->newest = 0;
72        r->transfers[r->newest].date = now;
73        r->transfers[r->newest].size = size;
74    }
75
76    /* invalidate cache_val*/
77    r->cache_time = 0;
78}
79
80/******
81*******
82*******
83******/
84
85static int
86compareBandwidth( const void * va, const void * vb )
87{
88    const tr_bandwidth * a = va;
89    const tr_bandwidth * b = vb;
90    return a->uniqueKey - b->uniqueKey;
91}
92
93/***
94****
95***/
96
97void
98tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent )
99{
100    static unsigned int uniqueKey = 0;
101
102    b->session = session;
103    b->children = TR_PTR_ARRAY_INIT;
104    b->magicNumber = BANDWIDTH_MAGIC_NUMBER;
105    b->uniqueKey = uniqueKey++;
106    b->band[TR_UP].honorParentLimits = true;
107    b->band[TR_DOWN].honorParentLimits = true;
108    tr_bandwidthSetParent( b, parent );
109}
110
111void
112tr_bandwidthDestruct( tr_bandwidth * b )
113{
114    assert( tr_isBandwidth( b ) );
115
116    tr_bandwidthSetParent( b, NULL );
117    tr_ptrArrayDestruct( &b->children, NULL );
118
119    memset( b, ~0, sizeof( tr_bandwidth ) );
120}
121
122/***
123****
124***/
125
126void
127tr_bandwidthSetParent( tr_bandwidth  * b,
128                       tr_bandwidth  * parent )
129{
130    assert( tr_isBandwidth( b ) );
131    assert( b != parent );
132
133    if( b->parent )
134    {
135        void * removed;
136
137        assert( tr_isBandwidth( b->parent ) );
138
139        removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, compareBandwidth );
140        assert( removed == b );
141        assert( tr_ptrArrayFindSorted( &b->parent->children, b, compareBandwidth ) == NULL );
142
143        b->parent = NULL;
144    }
145
146    if( parent )
147    {
148        assert( tr_isBandwidth( parent ) );
149        assert( parent->parent != b );
150
151        assert( tr_ptrArrayFindSorted( &parent->children, b, compareBandwidth ) == NULL );
152        tr_ptrArrayInsertSorted( &parent->children, b, compareBandwidth );
153        assert( tr_ptrArrayFindSorted( &parent->children, b, compareBandwidth ) == b );
154        b->parent = parent;
155    }
156}
157
158/***
159****
160***/
161#if 0
162#warning do not check the code in with this enabled
163#define DEBUG_DIRECTION TR_UP
164#endif
165
166static void
167allocateBandwidth( tr_bandwidth  * b,
168                   tr_priority_t   parent_priority,
169                   tr_direction    dir,
170                   unsigned int    period_msec,
171                   tr_ptrArray   * peer_pool )
172{
173    const tr_priority_t priority = MAX( parent_priority, b->priority );
174
175    assert( tr_isBandwidth( b ) );
176    assert( tr_isDirection( dir ) );
177
178    /* set the available bandwidth */
179    if( b->band[dir].isLimited )
180    {
181        const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
182        b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;
183
184#ifdef DEBUG_DIRECTION
185        if( dir == DEBUG_DIRECTION )
186                fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n",
187                         b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed,
188                         b->band[dir].bytesLeft );
189#endif
190    }
191
192    /* add this bandwidth's peer, if any, to the peer pool */
193    if( b->peer != NULL ) {
194        b->peer->priority = priority;
195        tr_ptrArrayAppend( peer_pool, b->peer );
196    }
197
198#ifdef DEBUG_DIRECTION
199if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) )
200fprintf( stderr, "bandwidth %p has %d peers\n", b, n );
201#endif
202
203    /* traverse & repeat for the subtree */
204    if( 1 ) {
205        int i;
206        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
207        const int n = tr_ptrArraySize( &b->children );
208        for( i=0; i<n; ++i )
209            allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
210    }
211}
212
213static void
214phaseOne( tr_ptrArray * peerArray, tr_direction dir )
215{
216    int i, n;
217    int peerCount = tr_ptrArraySize( peerArray );
218    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );
219
220    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
221     * peers from starving the others. Loop through the peers, giving each a
222     * small chunk of bandwidth. Keep looping until we run out of bandwidth
223     * and/or peers that can use it */
224    n = peerCount;
225    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
226    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
227    while( n > 0 )
228    {
229        /* value of 3000 bytes chosen so that when using uTP we'll send a full-size
230         * frame right away and leave enough buffered data for the next frame to go
231         * out in a timely manner. */
232        const size_t increment = 3000;
233
234        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );
235
236        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );
237
238        if( bytesUsed == (int)increment )
239            ++i;
240        else {
241            /* peer is done writing for now; move it to the end of the list */
242            tr_peerIo * pio = peers[i];
243            peers[i] = peers[n-1];
244            peers[n-1] = pio;
245            --n;
246        }
247
248        if( i >= n )
249            i = 0;
250    }
251}
252
253void
254tr_bandwidthAllocate( tr_bandwidth  * b,
255                      tr_direction    dir,
256                      unsigned int    period_msec )
257{
258    int i, peerCount;
259    tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
260    tr_ptrArray low = TR_PTR_ARRAY_INIT;
261    tr_ptrArray high = TR_PTR_ARRAY_INIT;
262    tr_ptrArray normal = TR_PTR_ARRAY_INIT;
263    struct tr_peerIo ** peers;
264
265    /* allocateBandwidth() is a helper function with two purposes:
266     * 1. allocate bandwidth to b and its subtree
267     * 2. accumulate an array of all the peerIos from b and its subtree. */
268    allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
269    peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
270    peerCount = tr_ptrArraySize( &tmp );
271
272    for( i=0; i<peerCount; ++i )
273    {
274        tr_peerIo * io = peers[i];
275        tr_peerIoRef( io );
276
277        tr_peerIoFlushOutgoingProtocolMsgs( io );
278
279        switch( io->priority ) {
280            case TR_PRI_HIGH:   tr_ptrArrayAppend( &high,   io ); /* fall through */
281            case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */
282            default:            tr_ptrArrayAppend( &low,    io );
283        }
284    }
285
286    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
287     * peers from starving the others. Loop through the peers, giving each a
288     * small chunk of bandwidth. Keep looping until we run out of bandwidth
289     * and/or peers that can use it */
290    phaseOne( &high, dir );
291    phaseOne( &normal, dir );
292    phaseOne( &low, dir );
293
294    /* Second phase of IO. To help us scale in high bandwidth situations,
295     * enable on-demand IO for peers with bandwidth left to burn.
296     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
297     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
298    for( i=0; i<peerCount; ++i )
299        tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
300
301    for( i=0; i<peerCount; ++i )
302        tr_peerIoUnref( peers[i] );
303
304    /* cleanup */
305    tr_ptrArrayDestruct( &normal, NULL );
306    tr_ptrArrayDestruct( &high, NULL );
307    tr_ptrArrayDestruct( &low, NULL );
308    tr_ptrArrayDestruct( &tmp, NULL );
309}
310
311void
312tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer )
313{
314    assert( tr_isBandwidth( b ) );
315    assert( ( peer == NULL ) || tr_isPeerIo( peer ) );
316
317    b->peer = peer;
318}
319
320/***
321****
322***/
323
324static unsigned int
325bandwidthClamp( const tr_bandwidth  * b,
326                uint64_t              now,
327                tr_direction          dir,
328                unsigned int          byteCount )
329{
330    assert( tr_isBandwidth( b ) );
331    assert( tr_isDirection( dir ) );
332
333    if( b )
334    {
335        if( b->band[dir].isLimited )
336        {
337            byteCount = MIN( byteCount, b->band[dir].bytesLeft );
338
339            /* if we're getting close to exceeding the speed limit,
340             * clamp down harder on the bytes available */
341            if( byteCount > 0 )
342            {
343                double current;
344                double desired;
345                double r;
346
347                if( now == 0 )
348                    now = tr_time_msec( );
349
350                current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN );
351                desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN );
352                r = desired >= 1 ? current / desired : 0;
353
354                     if( r > 1.0 ) byteCount = 0;
355                else if( r > 0.9 ) byteCount *= 0.8;
356                else if( r > 0.8 ) byteCount *= 0.9;
357            }
358        }
359
360        if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) )
361            byteCount = bandwidthClamp( b->parent, now, dir, byteCount );
362    }
363
364    return byteCount;
365}
366unsigned int
367tr_bandwidthClamp( const tr_bandwidth  * b,
368                   tr_direction          dir,
369                   unsigned int          byteCount )
370{
371    return bandwidthClamp( b, 0, dir, byteCount );
372}
373
374
375unsigned int
376tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
377{
378    assert( tr_isBandwidth( b ) );
379    assert( tr_isDirection( dir ) );
380
381    return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now );
382}
383
384unsigned int
385tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
386{
387    assert( tr_isBandwidth( b ) );
388    assert( tr_isDirection( dir ) );
389
390    return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now );
391}
392
393void
394tr_bandwidthUsed( tr_bandwidth  * b,
395                  tr_direction    dir,
396                  size_t          byteCount,
397                  bool         isPieceData,
398                  uint64_t        now )
399{
400    struct tr_band * band;
401
402    assert( tr_isBandwidth( b ) );
403    assert( tr_isDirection( dir ) );
404
405    band = &b->band[dir];
406
407    if( band->isLimited && isPieceData )
408        band->bytesLeft -= MIN( band->bytesLeft, byteCount );
409
410#ifdef DEBUG_DIRECTION
411if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) )
412fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
413         b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft );
414#endif
415
416    bytesUsed( now, &band->raw, byteCount );
417
418    if( isPieceData )
419        bytesUsed( now, &band->piece, byteCount );
420
421    if( b->parent != NULL )
422        tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now );
423}
Note: See TracBrowser for help on using the repository browser.