source: trunk/libtransmission/bandwidth.c @ 12415

Last change on this file since 12415 was 12415, checked in by jordan, 11 years ago

(trunk libT) #2338 "Add uTP support" -- increase the block bandwidth to 3000 bytes in phase 1 as suggested by jch in comment:120.

The rationale is that by using 3000 bytes we'll send one full-size frame straight away, and leave enough buffered data for the next frame to go out in a timely manner.

  • Property svn:keywords set to Date Rev Author Id
File size: 11.7 KB
Line 
1/*
2 * This file Copyright (C) Mnemosyne LLC
3 *
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
9 *
10 * $Id: bandwidth.c 12415 2011-05-04 21:38:01Z jordan $
11 */
12
13#include <assert.h>
14#include <limits.h>
15#include <string.h> /* memset() */
16
17#include "transmission.h"
18#include "bandwidth.h"
19#include "crypto.h" /* tr_cryptoWeakRandInt() */
20#include "peer-io.h"
21#include "utils.h"
22
23#define dbgmsg( ... ) \
24    do { \
25        if( tr_deepLoggingIsActive( ) ) \
26            tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \
27    } while( 0 )
28
29/***
30****
31***/
32
33static unsigned int
34getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now )
35{
36    if( !now )
37        now = tr_time_msec();
38
39    if( now != r->cache_time )
40    {
41        int i = r->newest;
42        uint64_t bytes = 0;
43        const uint64_t cutoff = now - interval_msec;
44        struct bratecontrol * rvolatile = (struct bratecontrol*) r;
45
46        for( ;; )
47        {
48            if( r->transfers[i].date <= cutoff )
49                break;
50
51            bytes += r->transfers[i].size;
52
53            if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */
54            if( i == r->newest ) break; /* we've come all the way around */
55        }
56
57        rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec);
58        rvolatile->cache_time = now;
59    }
60
61    return r->cache_val;
62}
63
64static void
65bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size )
66{
67    if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now )
68        r->transfers[r->newest].size += size;
69    else
70    {
71        if( ++r->newest == HISTORY_SIZE ) r->newest = 0;
72        r->transfers[r->newest].date = now;
73        r->transfers[r->newest].size = size;
74    }
75
76    /* invalidate cache_val*/
77    r->cache_time = 0;
78}
79
80/******
81*******
82*******
83******/
84
85static inline int
86comparePointers( const void * a, const void * b )
87{
88    if( a != b )
89        return a < b ? -1 : 1;
90
91    return 0;
92}
93
94/***
95****
96***/
97
98void
99tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent )
100{
101    b->session = session;
102    b->children = TR_PTR_ARRAY_INIT;
103    b->magicNumber = BANDWIDTH_MAGIC_NUMBER;
104    b->band[TR_UP].honorParentLimits = true;
105    b->band[TR_DOWN].honorParentLimits = true;
106    tr_bandwidthSetParent( b, parent );
107}
108
109void
110tr_bandwidthDestruct( tr_bandwidth * b )
111{
112    assert( tr_isBandwidth( b ) );
113
114    tr_bandwidthSetParent( b, NULL );
115    tr_ptrArrayDestruct( &b->children, NULL );
116
117    memset( b, ~0, sizeof( tr_bandwidth ) );
118}
119
120/***
121****
122***/
123
124void
125tr_bandwidthSetParent( tr_bandwidth  * b,
126                       tr_bandwidth  * parent )
127{
128    assert( tr_isBandwidth( b ) );
129    assert( b != parent );
130
131    if( b->parent )
132    {
133        void * removed;
134
135        assert( tr_isBandwidth( b->parent ) );
136
137        removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, comparePointers );
138        assert( removed == b );
139        assert( tr_ptrArrayFindSorted( &b->parent->children, b, comparePointers ) == NULL );
140
141        b->parent = NULL;
142    }
143
144    if( parent )
145    {
146        assert( tr_isBandwidth( parent ) );
147        assert( parent->parent != b );
148
149        tr_ptrArrayInsertSorted( &parent->children, b, comparePointers );
150        assert( tr_ptrArrayFindSorted( &parent->children, b, comparePointers ) == b );
151        b->parent = parent;
152    }
153}
154
155/***
156****
157***/
158#if 0
159#warning do not check the code in with this enabled
160#define DEBUG_DIRECTION TR_UP
161#endif
162
163static void
164allocateBandwidth( tr_bandwidth  * b,
165                   tr_priority_t   parent_priority,
166                   tr_direction    dir,
167                   unsigned int    period_msec,
168                   tr_ptrArray   * peer_pool )
169{
170    tr_priority_t priority;
171
172    assert( tr_isBandwidth( b ) );
173    assert( tr_isDirection( dir ) );
174
175    /* set the available bandwidth */
176    if( b->band[dir].isLimited )
177    {
178        const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
179        b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;
180
181#ifdef DEBUG_DIRECTION
182        if( dir == DEBUG_DIRECTION )
183                fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n",
184                         b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed,
185                         b->band[dir].bytesLeft );
186#endif
187    }
188
189    priority = MAX( parent_priority, b->priority );
190
191    /* add this bandwidth's peer, if any, to the peer pool */
192    if( b->peer != NULL ) {
193        b->peer->priority = priority;
194        tr_ptrArrayAppend( peer_pool, b->peer );
195    }
196
197#ifdef DEBUG_DIRECTION
198if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) )
199fprintf( stderr, "bandwidth %p has %d peers\n", b, n );
200#endif
201
202    /* traverse & repeat for the subtree */
203    if( 1 ) {
204        int i;
205        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
206        const int n = tr_ptrArraySize( &b->children );
207        for( i=0; i<n; ++i )
208            allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
209    }
210}
211
212static void
213phaseOne( tr_ptrArray * peerArray, tr_direction dir )
214{
215    int i, n;
216    int peerCount = tr_ptrArraySize( peerArray );
217    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );
218
219    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
220     * peers from starving the others. Loop through the peers, giving each a
221     * small chunk of bandwidth. Keep looping until we run out of bandwidth
222     * and/or peers that can use it */
223    n = peerCount;
224    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
225    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
226    while( n > 0 )
227    {
228        /* value of 3000 bytes chosen so that when using uTP we'll send a full-size
229         * frame right away and leave enough buffered data for the next frame to go
230         * out in a timely manner. */
231        const size_t increment = 3000;
232
233        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );
234
235        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );
236
237        if( bytesUsed == (int)increment )
238            ++i;
239        else {
240            /* peer is done writing for now; move it to the end of the list */
241            tr_peerIo * pio = peers[i];
242            peers[i] = peers[n-1];
243            peers[n-1] = pio;
244            --n;
245        }
246
247        if( i >= n )
248            i = 0;
249    }
250}
251
252void
253tr_bandwidthAllocate( tr_bandwidth  * b,
254                      tr_direction    dir,
255                      unsigned int    period_msec )
256{
257    int i, peerCount;
258    tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
259    tr_ptrArray low = TR_PTR_ARRAY_INIT;
260    tr_ptrArray high = TR_PTR_ARRAY_INIT;
261    tr_ptrArray normal = TR_PTR_ARRAY_INIT;
262    struct tr_peerIo ** peers;
263
264    /* allocateBandwidth() is a helper function with two purposes:
265     * 1. allocate bandwidth to b and its subtree
266     * 2. accumulate an array of all the peerIos from b and its subtree. */
267    allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
268    peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
269    peerCount = tr_ptrArraySize( &tmp );
270
271    for( i=0; i<peerCount; ++i )
272    {
273        tr_peerIo * io = peers[i];
274        tr_peerIoRef( io );
275
276        tr_peerIoFlushOutgoingProtocolMsgs( io );
277
278        switch( io->priority ) {
279            case TR_PRI_HIGH:   tr_ptrArrayAppend( &high,   io ); /* fall through */
280            case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */
281            default:            tr_ptrArrayAppend( &low,    io );
282        }
283    }
284
285    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
286     * peers from starving the others. Loop through the peers, giving each a
287     * small chunk of bandwidth. Keep looping until we run out of bandwidth
288     * and/or peers that can use it */
289    phaseOne( &high, dir );
290    phaseOne( &normal, dir );
291    phaseOne( &low, dir );
292
293    /* Second phase of IO. To help us scale in high bandwidth situations,
294     * enable on-demand IO for peers with bandwidth left to burn.
295     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
296     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
297    for( i=0; i<peerCount; ++i )
298        tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
299
300    for( i=0; i<peerCount; ++i )
301        tr_peerIoUnref( peers[i] );
302
303    /* cleanup */
304    tr_ptrArrayDestruct( &normal, NULL );
305    tr_ptrArrayDestruct( &high, NULL );
306    tr_ptrArrayDestruct( &low, NULL );
307    tr_ptrArrayDestruct( &tmp, NULL );
308}
309
310void
311tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer )
312{
313    assert( tr_isBandwidth( b ) );
314    assert( ( peer == NULL ) || tr_isPeerIo( peer ) );
315
316    b->peer = peer;
317}
318
319/***
320****
321***/
322
323static unsigned int
324bandwidthClamp( const tr_bandwidth  * b,
325                const uint64_t        now,
326                tr_direction          dir,
327                unsigned int          byteCount )
328{
329    assert( tr_isBandwidth( b ) );
330    assert( tr_isDirection( dir ) );
331
332    if( b )
333    {
334        if( b->band[dir].isLimited )
335        {
336            byteCount = MIN( byteCount, b->band[dir].bytesLeft );
337
338            /* if we're getting close to exceeding the speed limit,
339             * clamp down harder on the bytes available */
340            if( byteCount > 0 )
341            {
342                double current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN );
343                double desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN );
344                double r = desired >= 1 ? current / desired : 0;
345
346                     if( r > 1.0 ) byteCount = 0;
347                else if( r > 0.9 ) byteCount *= 0.8;
348                else if( r > 0.8 ) byteCount *= 0.9;
349            }
350        }
351
352        if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) )
353            byteCount = bandwidthClamp( b->parent, now, dir, byteCount );
354    }
355
356    return byteCount;
357}
358unsigned int
359tr_bandwidthClamp( const tr_bandwidth  * b,
360                   tr_direction          dir,
361                   unsigned int          byteCount )
362{
363    const uint64_t now_msec = tr_time_msec( );
364    return bandwidthClamp( b, now_msec, dir, byteCount );
365}
366
367
368unsigned int
369tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
370{
371    assert( tr_isBandwidth( b ) );
372    assert( tr_isDirection( dir ) );
373
374    return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now );
375}
376
377unsigned int
378tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
379{
380    assert( tr_isBandwidth( b ) );
381    assert( tr_isDirection( dir ) );
382
383    return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now );
384}
385
386void
387tr_bandwidthUsed( tr_bandwidth  * b,
388                  tr_direction    dir,
389                  size_t          byteCount,
390                  bool         isPieceData,
391                  uint64_t        now )
392{
393    struct tr_band * band;
394
395    assert( tr_isBandwidth( b ) );
396    assert( tr_isDirection( dir ) );
397
398    band = &b->band[dir];
399
400    if( band->isLimited && isPieceData )
401        band->bytesLeft -= MIN( band->bytesLeft, byteCount );
402
403#ifdef DEBUG_DIRECTION
404if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) )
405fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
406         b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft );
407#endif
408
409    bytesUsed( now, &band->raw, byteCount );
410
411    if( isPieceData )
412        bytesUsed( now, &band->piece, byteCount );
413
414    if( b->parent != NULL )
415        tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now );
416}
Note: See TracBrowser for help on using the repository browser.