@@ -133,15 +133,15 @@ type BurndownResult struct {
133
133
// The rest of the elements are equal the number of line removals by the corresponding
134
134
// authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
135
135
PeopleMatrix DenseHistory
136
- // The size of each tick.
137
- TickSize time.Duration
138
136
139
137
// The following members are private.
140
138
141
139
// reversedPeopleDict is borrowed from IdentityDetector and becomes available after
142
140
// Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
143
141
// facts[FactIdentityDetectorReversedPeopleDict].
144
142
reversedPeopleDict []string
143
+ // tickSize references TicksSinceStart.tickSize
144
+ tickSize time.Duration
145
145
// sampling and granularity are copied from BurndownAnalysis and stored for service purposes
146
146
// such as merging several results together.
147
147
sampling int
@@ -571,7 +571,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
571
571
FileOwnership : fileOwnership ,
572
572
PeopleHistories : peopleHistories ,
573
573
PeopleMatrix : peopleMatrix ,
574
- TickSize : analyser .tickSize ,
574
+ tickSize : analyser .tickSize ,
575
575
reversedPeopleDict : analyser .reversedPeopleDict ,
576
576
sampling : analyser .Sampling ,
577
577
granularity : analyser .Granularity ,
@@ -613,7 +613,7 @@ func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, er
613
613
GlobalHistory : convertCSR (msg .Project ),
614
614
FileHistories : map [string ]DenseHistory {},
615
615
FileOwnership : map [string ]map [int ]int {},
616
- TickSize : time .Duration (msg .GetTickSize () ),
616
+ tickSize : time .Duration (msg .TickSize ),
617
617
618
618
granularity : int (msg .Granularity ),
619
619
sampling : int (msg .Sampling ),
@@ -649,17 +649,17 @@ func (analyser *BurndownAnalysis) MergeResults(
649
649
r1 , r2 interface {}, c1 , c2 * core.CommonAnalysisResult ) interface {} {
650
650
bar1 := r1 .(BurndownResult )
651
651
bar2 := r2 .(BurndownResult )
652
- if bar1 .TickSize != bar2 .TickSize {
652
+ if bar1 .tickSize != bar2 .tickSize {
653
653
return fmt .Errorf ("mismatching tick sizes (r1: %d, r2: %d) received" ,
654
- bar1 .TickSize , bar2 .TickSize )
654
+ bar1 .tickSize , bar2 .tickSize )
655
655
}
656
656
// for backwards-compatibility, if no tick size is present set to default
657
- analyser .tickSize = bar1 .TickSize
657
+ analyser .tickSize = bar1 .tickSize
658
658
if analyser .tickSize == 0 {
659
659
analyser .tickSize = items .DefaultTicksSinceStartTickSize * time .Hour
660
660
}
661
661
merged := BurndownResult {
662
- TickSize : analyser .tickSize ,
662
+ tickSize : analyser .tickSize ,
663
663
}
664
664
if bar1 .sampling < bar2 .sampling {
665
665
merged .sampling = bar1 .sampling
@@ -683,6 +683,7 @@ func (analyser *BurndownAnalysis) MergeResults(
683
683
bar1 .GlobalHistory , bar2 .GlobalHistory ,
684
684
bar1 .granularity , bar1 .sampling ,
685
685
bar2 .granularity , bar2 .sampling ,
686
+ bar1 .tickSize ,
686
687
c1 , c2 )
687
688
}()
688
689
}
@@ -706,6 +707,7 @@ func (analyser *BurndownAnalysis) MergeResults(
706
707
m1 , m2 ,
707
708
bar1 .granularity , bar1 .sampling ,
708
709
bar2 .granularity , bar2 .sampling ,
710
+ bar1 .tickSize ,
709
711
c1 , c2 ,
710
712
)
711
713
}(i )
@@ -755,8 +757,11 @@ func (analyser *BurndownAnalysis) MergeResults(
755
757
return merged
756
758
}
757
759
758
- func (analyser * BurndownAnalysis ) roundTime (unix int64 , dir bool ) int {
759
- ticks := float64 (unix ) / analyser .tickSize .Seconds ()
760
+ func roundTime (t time.Time , d time.Duration , dir bool ) int {
761
+ if ! dir {
762
+ t = items .FloorTime (t , d )
763
+ }
764
+ ticks := float64 (t .Unix ()) / d .Seconds ()
760
765
if dir {
761
766
return int (math .Ceil (ticks ))
762
767
}
@@ -766,7 +771,8 @@ func (analyser *BurndownAnalysis) roundTime(unix int64, dir bool) int {
766
771
// mergeMatrices takes two [number of samples][number of bands] matrices,
767
772
// resamples them to ticks so that they become square, sums and resamples back to the
768
773
// least of (sampling1, sampling2) and (granularity1, granularity2).
769
- func (analyser * BurndownAnalysis ) mergeMatrices (m1 , m2 DenseHistory , granularity1 , sampling1 , granularity2 , sampling2 int ,
774
+ func (analyser * BurndownAnalysis ) mergeMatrices (
775
+ m1 , m2 DenseHistory , granularity1 , sampling1 , granularity2 , sampling2 int , tickSize time.Duration ,
770
776
c1 , c2 * core.CommonAnalysisResult ) DenseHistory {
771
777
commonMerged := c1 .Copy ()
772
778
commonMerged .Merge (c2 )
@@ -783,19 +789,19 @@ func (analyser *BurndownAnalysis) mergeMatrices(m1, m2 DenseHistory, granularity
783
789
granularity = granularity2
784
790
}
785
791
786
- size := analyser . roundTime (commonMerged .EndTime , true ) -
787
- analyser . roundTime (commonMerged .BeginTime , false )
792
+ size := roundTime (commonMerged .EndTimeAsTime (), tickSize , true ) -
793
+ roundTime (commonMerged .BeginTimeAsTime (), tickSize , false )
788
794
perTick := make ([][]float32 , size + granularity )
789
795
for i := range perTick {
790
796
perTick [i ] = make ([]float32 , size + sampling )
791
797
}
792
798
if len (m1 ) > 0 {
793
799
addBurndownMatrix (m1 , granularity1 , sampling1 , perTick ,
794
- analyser . roundTime (c1 .BeginTime , false )- analyser . roundTime (commonMerged .BeginTime , false ))
800
+ roundTime (c1 .BeginTimeAsTime (), tickSize , false )- roundTime (commonMerged .BeginTimeAsTime (), tickSize , false ))
795
801
}
796
802
if len (m2 ) > 0 {
797
803
addBurndownMatrix (m2 , granularity2 , sampling2 , perTick ,
798
- analyser . roundTime (c2 .BeginTime , false )- analyser . roundTime (commonMerged .BeginTime , false ))
804
+ roundTime (c2 .BeginTimeAsTime (), tickSize , false )- roundTime (commonMerged .BeginTimeAsTime (), tickSize , false ))
799
805
}
800
806
801
807
// convert daily to [][]int64
@@ -992,7 +998,7 @@ func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, accPerTic
992
998
func (analyser * BurndownAnalysis ) serializeText (result * BurndownResult , writer io.Writer ) {
993
999
fmt .Fprintln (writer , " granularity:" , result .granularity )
994
1000
fmt .Fprintln (writer , " sampling:" , result .sampling )
995
- fmt .Fprintln (writer , " tick_size:" , result .TickSize )
1001
+ fmt .Fprintln (writer , " tick_size:" , int ( result .tickSize . Seconds ()) )
996
1002
yaml .PrintMatrix (writer , result .GlobalHistory , 2 , "project" , true )
997
1003
if len (result .FileHistories ) > 0 {
998
1004
fmt .Fprintln (writer , " files:" )
@@ -1045,7 +1051,7 @@ func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer
1045
1051
message := pb.BurndownAnalysisResults {
1046
1052
Granularity : int32 (result .granularity ),
1047
1053
Sampling : int32 (result .sampling ),
1048
- TickSize : int64 (result .TickSize ),
1054
+ TickSize : int64 (result .tickSize ),
1049
1055
}
1050
1056
if len (result .GlobalHistory ) > 0 {
1051
1057
message .Project = pb .ToBurndownSparseMatrix (result .GlobalHistory , "project" )
0 commit comments