source: trunk/libtransmission/bandwidth.c @ 12361

Last change on this file since 12361 was 12361, checked in by jordan, 11 years ago

(trunk libT) changes to the bandwidth allocator's phaseOne step as suggested by Vincent in #2338 comment:108

  • Property svn:keywords set to Date Rev Author Id
File size: 11.5 KB
Line 
1/*
2 * This file Copyright (C) Mnemosyne LLC
3 *
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
9 *
10 * $Id: bandwidth.c 12361 2011-04-16 21:46:32Z jordan $
11 */
12
13#include <assert.h>
14#include <limits.h>
15#include <string.h> /* memset() */
16
17#include "transmission.h"
18#include "bandwidth.h"
19#include "crypto.h" /* tr_cryptoWeakRandInt() */
20#include "peer-io.h"
21#include "utils.h"
22
23#define dbgmsg( ... ) \
24    do { \
25        if( tr_deepLoggingIsActive( ) ) \
26            tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \
27    } while( 0 )
28
29/***
30****
31***/
32
33static unsigned int
34getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now )
35{
36    if( !now )
37        now = tr_time_msec();
38
39    if( now != r->cache_time )
40    {
41        int i = r->newest;
42        uint64_t bytes = 0;
43        const uint64_t cutoff = now - interval_msec;
44        struct bratecontrol * rvolatile = (struct bratecontrol*) r;
45
46        for( ;; )
47        {
48            if( r->transfers[i].date <= cutoff )
49                break;
50
51            bytes += r->transfers[i].size;
52
53            if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */
54            if( i == r->newest ) break; /* we've come all the way around */
55        }
56
57        rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec);
58        rvolatile->cache_time = now;
59    }
60
61    return r->cache_val;
62}
63
64static void
65bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size )
66{
67    if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now )
68        r->transfers[r->newest].size += size;
69    else
70    {
71        if( ++r->newest == HISTORY_SIZE ) r->newest = 0;
72        r->transfers[r->newest].date = now;
73        r->transfers[r->newest].size = size;
74    }
75
76    /* invalidate cache_val*/
77    r->cache_time = 0;
78}
79
80/******
81*******
82*******
83******/
84
85static inline int
86comparePointers( const void * a, const void * b )
87{
88    if( a != b )
89        return a < b ? -1 : 1;
90
91    return 0;
92}
93
94/***
95****
96***/
97
98void
99tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent )
100{
101    b->session = session;
102    b->children = TR_PTR_ARRAY_INIT;
103    b->magicNumber = BANDWIDTH_MAGIC_NUMBER;
104    b->band[TR_UP].honorParentLimits = true;
105    b->band[TR_DOWN].honorParentLimits = true;
106    tr_bandwidthSetParent( b, parent );
107}
108
109void
110tr_bandwidthDestruct( tr_bandwidth * b )
111{
112    assert( tr_isBandwidth( b ) );
113
114    tr_bandwidthSetParent( b, NULL );
115    tr_ptrArrayDestruct( &b->children, NULL );
116
117    memset( b, ~0, sizeof( tr_bandwidth ) );
118}
119
120/***
121****
122***/
123
124void
125tr_bandwidthSetParent( tr_bandwidth  * b,
126                       tr_bandwidth  * parent )
127{
128    assert( tr_isBandwidth( b ) );
129    assert( b != parent );
130
131    if( b->parent )
132    {
133        void * removed;
134
135        assert( tr_isBandwidth( b->parent ) );
136
137        removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, comparePointers );
138        assert( removed == b );
139        assert( tr_ptrArrayFindSorted( &b->parent->children, b, comparePointers ) == NULL );
140
141        b->parent = NULL;
142    }
143
144    if( parent )
145    {
146        assert( tr_isBandwidth( parent ) );
147        assert( parent->parent != b );
148
149        tr_ptrArrayInsertSorted( &parent->children, b, comparePointers );
150        assert( tr_ptrArrayFindSorted( &parent->children, b, comparePointers ) == b );
151        b->parent = parent;
152    }
153}
154
155/***
156****
157***/
158#if 0
159#warning do not check the code in with this enabled
160#define DEBUG_DIRECTION TR_UP
161#endif
162
163static void
164allocateBandwidth( tr_bandwidth  * b,
165                   tr_priority_t   parent_priority,
166                   tr_direction    dir,
167                   unsigned int    period_msec,
168                   tr_ptrArray   * peer_pool )
169{
170    tr_priority_t priority;
171
172    assert( tr_isBandwidth( b ) );
173    assert( tr_isDirection( dir ) );
174
175    /* set the available bandwidth */
176    if( b->band[dir].isLimited )
177    {
178        const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
179        b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;
180
181#ifdef DEBUG_DIRECTION
182        if( dir == DEBUG_DIRECTION )
183                fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n",
184                         b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed,
185                         b->band[dir].bytesLeft );
186#endif
187    }
188
189    priority = MAX( parent_priority, b->priority );
190
191    /* add this bandwidth's peer, if any, to the peer pool */
192    if( b->peer != NULL ) {
193        b->peer->priority = priority;
194        tr_ptrArrayAppend( peer_pool, b->peer );
195    }
196
197#ifdef DEBUG_DIRECTION
198if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) )
199fprintf( stderr, "bandwidth %p has %d peers\n", b, n );
200#endif
201
202    /* traverse & repeat for the subtree */
203    if( 1 ) {
204        int i;
205        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
206        const int n = tr_ptrArraySize( &b->children );
207        for( i=0; i<n; ++i )
208            allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
209    }
210}
211
212static void
213phaseOne( tr_ptrArray * peerArray, tr_direction dir )
214{
215    int i, n;
216    int peerCount = tr_ptrArraySize( peerArray );
217    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );
218
219    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
220     * peers from starving the others. Loop through the peers, giving each a
221     * small chunk of bandwidth. Keep looping until we run out of bandwidth
222     * and/or peers that can use it */
223    n = peerCount;
224    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
225    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
226    while( n > 0 )
227    {
228        const size_t increment = 1024;
229        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );
230
231        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );
232
233        if( bytesUsed == (int)increment )
234            ++i;
235        else {
236            /* peer is done writing for now; move it to the end of the list */
237            tr_peerIo * pio = peers[i];
238            peers[i] = peers[n-1];
239            peers[n-1] = pio;
240            --n;
241        }
242
243        if( i >= n )
244            i = 0;
245    }
246}
247
248void
249tr_bandwidthAllocate( tr_bandwidth  * b,
250                      tr_direction    dir,
251                      unsigned int    period_msec )
252{
253    int i, peerCount;
254    tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
255    tr_ptrArray low = TR_PTR_ARRAY_INIT;
256    tr_ptrArray high = TR_PTR_ARRAY_INIT;
257    tr_ptrArray normal = TR_PTR_ARRAY_INIT;
258    struct tr_peerIo ** peers;
259
260    /* allocateBandwidth() is a helper function with two purposes:
261     * 1. allocate bandwidth to b and its subtree
262     * 2. accumulate an array of all the peerIos from b and its subtree. */
263    allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
264    peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
265    peerCount = tr_ptrArraySize( &tmp );
266
267    for( i=0; i<peerCount; ++i )
268    {
269        tr_peerIo * io = peers[i];
270        tr_peerIoRef( io );
271
272        tr_peerIoFlushOutgoingProtocolMsgs( io );
273
274        switch( io->priority ) {
275            case TR_PRI_HIGH:   tr_ptrArrayAppend( &high,   io ); /* fall through */
276            case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */
277            default:            tr_ptrArrayAppend( &low,    io );
278        }
279    }
280
281    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
282     * peers from starving the others. Loop through the peers, giving each a
283     * small chunk of bandwidth. Keep looping until we run out of bandwidth
284     * and/or peers that can use it */
285    phaseOne( &high, dir );
286    phaseOne( &normal, dir );
287    phaseOne( &low, dir );
288
289    /* Second phase of IO. To help us scale in high bandwidth situations,
290     * enable on-demand IO for peers with bandwidth left to burn.
291     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
292     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
293    for( i=0; i<peerCount; ++i )
294        tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
295
296    for( i=0; i<peerCount; ++i )
297        tr_peerIoUnref( peers[i] );
298
299    /* cleanup */
300    tr_ptrArrayDestruct( &normal, NULL );
301    tr_ptrArrayDestruct( &high, NULL );
302    tr_ptrArrayDestruct( &low, NULL );
303    tr_ptrArrayDestruct( &tmp, NULL );
304}
305
306void
307tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer )
308{
309    assert( tr_isBandwidth( b ) );
310    assert( ( peer == NULL ) || tr_isPeerIo( peer ) );
311
312    b->peer = peer;
313}
314
315/***
316****
317***/
318
319static unsigned int
320bandwidthClamp( const tr_bandwidth  * b,
321                const uint64_t        now,
322                tr_direction          dir,
323                unsigned int          byteCount )
324{
325    assert( tr_isBandwidth( b ) );
326    assert( tr_isDirection( dir ) );
327
328    if( b )
329    {
330        if( b->band[dir].isLimited )
331        {
332            byteCount = MIN( byteCount, b->band[dir].bytesLeft );
333
334            /* if we're getting close to exceeding the speed limit,
335             * clamp down harder on the bytes available */
336            if( byteCount > 0 )
337            {
338                double current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN );
339                double desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN );
340                double r = desired >= 1 ? current / desired : 0;
341
342                     if( r > 1.0 ) byteCount = 0;
343                else if( r > 0.9 ) byteCount *= 0.8;
344                else if( r > 0.8 ) byteCount *= 0.9;
345            }
346        }
347
348        if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) )
349            byteCount = bandwidthClamp( b->parent, now, dir, byteCount );
350    }
351
352    return byteCount;
353}
354unsigned int
355tr_bandwidthClamp( const tr_bandwidth  * b,
356                   tr_direction          dir,
357                   unsigned int          byteCount )
358{
359    const uint64_t now_msec = tr_time_msec( );
360    return bandwidthClamp( b, now_msec, dir, byteCount );
361}
362
363
364unsigned int
365tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
366{
367    assert( tr_isBandwidth( b ) );
368    assert( tr_isDirection( dir ) );
369
370    return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now );
371}
372
373unsigned int
374tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
375{
376    assert( tr_isBandwidth( b ) );
377    assert( tr_isDirection( dir ) );
378
379    return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now );
380}
381
382void
383tr_bandwidthUsed( tr_bandwidth  * b,
384                  tr_direction    dir,
385                  size_t          byteCount,
386                  bool         isPieceData,
387                  uint64_t        now )
388{
389    struct tr_band * band;
390
391    assert( tr_isBandwidth( b ) );
392    assert( tr_isDirection( dir ) );
393
394    band = &b->band[dir];
395
396    if( band->isLimited && isPieceData )
397        band->bytesLeft -= MIN( band->bytesLeft, byteCount );
398
399#ifdef DEBUG_DIRECTION
400if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) )
401fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
402         b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft );
403#endif
404
405    bytesUsed( now, &band->raw, byteCount );
406
407    if( isPieceData )
408        bytesUsed( now, &band->piece, byteCount );
409
410    if( b->parent != NULL )
411        tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now );
412}
Note: See TracBrowser for help on using the repository browser.