source: trunk/libtransmission/bandwidth.c @ 12427

Last change on this file since 12427 was 12427, checked in by jordan, 11 years ago

(trunk libT) make allocateBandwidth()'s "priority" field const.

  • Property svn:keywords set to Date Rev Author Id
File size: 11.8 KB
Line 
1/*
2 * This file Copyright (C) Mnemosyne LLC
3 *
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
9 *
10 * $Id: bandwidth.c 12427 2011-05-11 21:09:31Z jordan $
11 */
12
13#include <assert.h>
14#include <limits.h>
15#include <string.h> /* memset() */
16
17#include "transmission.h"
18#include "bandwidth.h"
19#include "crypto.h" /* tr_cryptoWeakRandInt() */
20#include "peer-io.h"
21#include "utils.h"
22
23#define dbgmsg( ... ) \
24    do { \
25        if( tr_deepLoggingIsActive( ) ) \
26            tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \
27    } while( 0 )
28
29/***
30****
31***/
32
33static unsigned int
34getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now )
35{
36    if( !now )
37        now = tr_time_msec();
38
39    if( now != r->cache_time )
40    {
41        int i = r->newest;
42        uint64_t bytes = 0;
43        const uint64_t cutoff = now - interval_msec;
44        struct bratecontrol * rvolatile = (struct bratecontrol*) r;
45
46        for( ;; )
47        {
48            if( r->transfers[i].date <= cutoff )
49                break;
50
51            bytes += r->transfers[i].size;
52
53            if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */
54            if( i == r->newest ) break; /* we've come all the way around */
55        }
56
57        rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec);
58        rvolatile->cache_time = now;
59    }
60
61    return r->cache_val;
62}
63
64static void
65bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size )
66{
67    if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now )
68        r->transfers[r->newest].size += size;
69    else
70    {
71        if( ++r->newest == HISTORY_SIZE ) r->newest = 0;
72        r->transfers[r->newest].date = now;
73        r->transfers[r->newest].size = size;
74    }
75
76    /* invalidate cache_val*/
77    r->cache_time = 0;
78}
79
80/******
81*******
82*******
83******/
84
85static inline int
86comparePointers( const void * a, const void * b )
87{
88    if( a != b )
89        return a < b ? -1 : 1;
90
91    return 0;
92}
93
94/***
95****
96***/
97
98void
99tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent )
100{
101    b->session = session;
102    b->children = TR_PTR_ARRAY_INIT;
103    b->magicNumber = BANDWIDTH_MAGIC_NUMBER;
104    b->band[TR_UP].honorParentLimits = true;
105    b->band[TR_DOWN].honorParentLimits = true;
106    tr_bandwidthSetParent( b, parent );
107}
108
109void
110tr_bandwidthDestruct( tr_bandwidth * b )
111{
112    assert( tr_isBandwidth( b ) );
113
114    tr_bandwidthSetParent( b, NULL );
115    tr_ptrArrayDestruct( &b->children, NULL );
116
117    memset( b, ~0, sizeof( tr_bandwidth ) );
118}
119
120/***
121****
122***/
123
124void
125tr_bandwidthSetParent( tr_bandwidth  * b,
126                       tr_bandwidth  * parent )
127{
128    assert( tr_isBandwidth( b ) );
129    assert( b != parent );
130
131    if( b->parent )
132    {
133        void * removed;
134
135        assert( tr_isBandwidth( b->parent ) );
136
137        removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, comparePointers );
138        assert( removed == b );
139        assert( tr_ptrArrayFindSorted( &b->parent->children, b, comparePointers ) == NULL );
140
141        b->parent = NULL;
142    }
143
144    if( parent )
145    {
146        assert( tr_isBandwidth( parent ) );
147        assert( parent->parent != b );
148
149        tr_ptrArrayInsertSorted( &parent->children, b, comparePointers );
150        assert( tr_ptrArrayFindSorted( &parent->children, b, comparePointers ) == b );
151        b->parent = parent;
152    }
153}
154
155/***
156****
157***/
158#if 0
159#warning do not check the code in with this enabled
160#define DEBUG_DIRECTION TR_UP
161#endif
162
163static void
164allocateBandwidth( tr_bandwidth  * b,
165                   tr_priority_t   parent_priority,
166                   tr_direction    dir,
167                   unsigned int    period_msec,
168                   tr_ptrArray   * peer_pool )
169{
170    const tr_priority_t priority = MAX( parent_priority, b->priority );
171
172    assert( tr_isBandwidth( b ) );
173    assert( tr_isDirection( dir ) );
174
175    /* set the available bandwidth */
176    if( b->band[dir].isLimited )
177    {
178        const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
179        b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;
180
181#ifdef DEBUG_DIRECTION
182        if( dir == DEBUG_DIRECTION )
183                fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n",
184                         b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed,
185                         b->band[dir].bytesLeft );
186#endif
187    }
188
189    /* add this bandwidth's peer, if any, to the peer pool */
190    if( b->peer != NULL ) {
191        b->peer->priority = priority;
192        tr_ptrArrayAppend( peer_pool, b->peer );
193    }
194
195#ifdef DEBUG_DIRECTION
196if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) )
197fprintf( stderr, "bandwidth %p has %d peers\n", b, n );
198#endif
199
200    /* traverse & repeat for the subtree */
201    if( 1 ) {
202        int i;
203        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
204        const int n = tr_ptrArraySize( &b->children );
205        for( i=0; i<n; ++i )
206            allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
207    }
208}
209
210static void
211phaseOne( tr_ptrArray * peerArray, tr_direction dir )
212{
213    int i, n;
214    int peerCount = tr_ptrArraySize( peerArray );
215    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );
216
217    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
218     * peers from starving the others. Loop through the peers, giving each a
219     * small chunk of bandwidth. Keep looping until we run out of bandwidth
220     * and/or peers that can use it */
221    n = peerCount;
222    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
223    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
224    while( n > 0 )
225    {
226        /* value of 3000 bytes chosen so that when using uTP we'll send a full-size
227         * frame right away and leave enough buffered data for the next frame to go
228         * out in a timely manner. */
229        const size_t increment = 3000;
230
231        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );
232
233        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );
234
235        if( bytesUsed == (int)increment )
236            ++i;
237        else {
238            /* peer is done writing for now; move it to the end of the list */
239            tr_peerIo * pio = peers[i];
240            peers[i] = peers[n-1];
241            peers[n-1] = pio;
242            --n;
243        }
244
245        if( i >= n )
246            i = 0;
247    }
248}
249
250void
251tr_bandwidthAllocate( tr_bandwidth  * b,
252                      tr_direction    dir,
253                      unsigned int    period_msec )
254{
255    int i, peerCount;
256    tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
257    tr_ptrArray low = TR_PTR_ARRAY_INIT;
258    tr_ptrArray high = TR_PTR_ARRAY_INIT;
259    tr_ptrArray normal = TR_PTR_ARRAY_INIT;
260    struct tr_peerIo ** peers;
261
262    /* allocateBandwidth() is a helper function with two purposes:
263     * 1. allocate bandwidth to b and its subtree
264     * 2. accumulate an array of all the peerIos from b and its subtree. */
265    allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
266    peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
267    peerCount = tr_ptrArraySize( &tmp );
268
269    for( i=0; i<peerCount; ++i )
270    {
271        tr_peerIo * io = peers[i];
272        tr_peerIoRef( io );
273
274        tr_peerIoFlushOutgoingProtocolMsgs( io );
275
276        switch( io->priority ) {
277            case TR_PRI_HIGH:   tr_ptrArrayAppend( &high,   io ); /* fall through */
278            case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */
279            default:            tr_ptrArrayAppend( &low,    io );
280        }
281    }
282
283    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
284     * peers from starving the others. Loop through the peers, giving each a
285     * small chunk of bandwidth. Keep looping until we run out of bandwidth
286     * and/or peers that can use it */
287    phaseOne( &high, dir );
288    phaseOne( &normal, dir );
289    phaseOne( &low, dir );
290
291    /* Second phase of IO. To help us scale in high bandwidth situations,
292     * enable on-demand IO for peers with bandwidth left to burn.
293     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
294     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
295    for( i=0; i<peerCount; ++i )
296        tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
297
298    for( i=0; i<peerCount; ++i )
299        tr_peerIoUnref( peers[i] );
300
301    /* cleanup */
302    tr_ptrArrayDestruct( &normal, NULL );
303    tr_ptrArrayDestruct( &high, NULL );
304    tr_ptrArrayDestruct( &low, NULL );
305    tr_ptrArrayDestruct( &tmp, NULL );
306}
307
308void
309tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer )
310{
311    assert( tr_isBandwidth( b ) );
312    assert( ( peer == NULL ) || tr_isPeerIo( peer ) );
313
314    b->peer = peer;
315}
316
317/***
318****
319***/
320
321static unsigned int
322bandwidthClamp( const tr_bandwidth  * b,
323                uint64_t              now,
324                tr_direction          dir,
325                unsigned int          byteCount )
326{
327    assert( tr_isBandwidth( b ) );
328    assert( tr_isDirection( dir ) );
329
330    if( b )
331    {
332        if( b->band[dir].isLimited )
333        {
334            byteCount = MIN( byteCount, b->band[dir].bytesLeft );
335
336            /* if we're getting close to exceeding the speed limit,
337             * clamp down harder on the bytes available */
338            if( byteCount > 0 )
339            {
340                double current;
341                double desired;
342                double r;
343
344                if( now == 0 )
345                    now = tr_time_msec( );
346
347                current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN );
348                desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN );
349                r = desired >= 1 ? current / desired : 0;
350
351                     if( r > 1.0 ) byteCount = 0;
352                else if( r > 0.9 ) byteCount *= 0.8;
353                else if( r > 0.8 ) byteCount *= 0.9;
354            }
355        }
356
357        if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) )
358            byteCount = bandwidthClamp( b->parent, now, dir, byteCount );
359    }
360
361    return byteCount;
362}
363unsigned int
364tr_bandwidthClamp( const tr_bandwidth  * b,
365                   tr_direction          dir,
366                   unsigned int          byteCount )
367{
368    return bandwidthClamp( b, 0, dir, byteCount );
369}
370
371
372unsigned int
373tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
374{
375    assert( tr_isBandwidth( b ) );
376    assert( tr_isDirection( dir ) );
377
378    return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now );
379}
380
381unsigned int
382tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
383{
384    assert( tr_isBandwidth( b ) );
385    assert( tr_isDirection( dir ) );
386
387    return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now );
388}
389
390void
391tr_bandwidthUsed( tr_bandwidth  * b,
392                  tr_direction    dir,
393                  size_t          byteCount,
394                  bool         isPieceData,
395                  uint64_t        now )
396{
397    struct tr_band * band;
398
399    assert( tr_isBandwidth( b ) );
400    assert( tr_isDirection( dir ) );
401
402    band = &b->band[dir];
403
404    if( band->isLimited && isPieceData )
405        band->bytesLeft -= MIN( band->bytesLeft, byteCount );
406
407#ifdef DEBUG_DIRECTION
408if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) )
409fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
410         b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft );
411#endif
412
413    bytesUsed( now, &band->raw, byteCount );
414
415    if( isPieceData )
416        bytesUsed( now, &band->piece, byteCount );
417
418    if( b->parent != NULL )
419        tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now );
420}
Note: See TracBrowser for help on using the repository browser.