Skip to content

Commit 154f0a0

Browse files
authored
Merge pull request #279 from vmarkovtsev/master
Ticks and --devs
2 parents 5c9c6c1 + cd51dbd commit 154f0a0

File tree

11 files changed

+521
-960
lines changed

11 files changed

+521
-960
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ internal/pb/pb.pb.go: internal/pb/pb.proto ${GOPATH}/bin/protoc-gen-gogo.exe
2525
endif
2626

2727
python/labours/pb_pb2.py: internal/pb/pb.proto
28-
protoc --python_out python/hercules --proto_path=internal/pb internal/pb/pb.proto
28+
protoc --python_out python/labours --proto_path=internal/pb internal/pb/pb.proto
2929

3030
cmd/hercules/plugin_template_source.go: cmd/hercules/plugin.template
3131
cd cmd/hercules && go generate

cmd/hercules/combine.go

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,10 @@ var combineCmd = &cobra.Command{
5959
bar.Increment()
6060
anotherResults, anotherMetadata, errs := loadMessage(fileName, &repos)
6161
if anotherMetadata != nil {
62-
mergeResults(mergedResults, mergedMetadata, anotherResults, anotherMetadata, only)
62+
mergeErrs := mergeResults(mergedResults, mergedMetadata, anotherResults, anotherMetadata, only)
63+
for _, err := range mergeErrs {
64+
errs = append(errs, err.Error())
65+
}
6366
}
6467
allErrors[fileName] = errs
6568
debug.FreeOSMemory()
@@ -173,7 +176,8 @@ func mergeResults(mergedResults map[string]interface{},
173176
mergedCommons *hercules.CommonAnalysisResult,
174177
anotherResults map[string]interface{},
175178
anotherCommons *hercules.CommonAnalysisResult,
176-
only string) {
179+
only string) []error {
180+
var errors []error
177181
for key, val := range anotherResults {
178182
if only != "" && key != only {
179183
continue
@@ -185,13 +189,18 @@ func mergeResults(mergedResults map[string]interface{},
185189
}
186190
item := hercules.Registry.Summon(key)[0].(hercules.ResultMergeablePipelineItem)
187191
mergedResult = item.MergeResults(mergedResult, val, mergedCommons, anotherCommons)
188-
mergedResults[key] = mergedResult
192+
if err, isErr := mergedResult.(error); isErr {
193+
errors = append(errors, fmt.Errorf("could not merge %s: %v", item.Name(), err))
194+
} else {
195+
mergedResults[key] = mergedResult
196+
}
189197
}
190198
if mergedCommons.CommitsNumber == 0 {
191199
*mergedCommons = *anotherCommons
192200
} else {
193201
mergedCommons.Merge(anotherCommons)
194202
}
203+
return errors
195204
}
196205

197206
func getOptionsString() string {

internal/pb/pb.pb.go

Lines changed: 325 additions & 886 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

internal/pb/pb.proto

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,10 @@ message TickDevs {
136136

137137
message DevsAnalysisResults {
138138
map<int32, TickDevs> ticks = 1;
139+
// developer identities, the indexes correspond to TickDevs' keys.
139140
repeated string dev_index = 2;
141+
// how long each tick is, as an int64 nanosecond count (Go's time.Duration)
142+
int64 tick_size = 8;
140143
}
141144

142145
message Sentiment {

internal/plumbing/ticks.go

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -123,11 +123,12 @@ func (ticks *TicksSinceStart) Consume(deps map[string]interface{}) (map[string]i
123123
if index == 0 {
124124
// first iteration - initialize the file objects from the tree
125125
// our precision is 1 day
126-
*ticks.tick0 = commit.Committer.When
127-
if ticks.tick0.Unix() < 631152000 { // 01.01.1990, that was 30 years ago
126+
tick0 := commit.Committer.When
127+
if tick0.Unix() < 631152000 { // 01.01.1990, that was 30 years ago
128128
ticks.l.Warnf("suspicious committer timestamp in %s > %s: %d",
129-
ticks.remote, commit.Hash.String(), ticks.tick0.Unix())
129+
ticks.remote, commit.Hash.String(), tick0.Unix())
130130
}
131+
*ticks.tick0 = FloorTime(tick0, ticks.TickSize)
131132
}
132133

133134
tick := int(commit.Committer.When.Sub(*ticks.tick0) / ticks.TickSize)
@@ -163,6 +164,16 @@ func (ticks *TicksSinceStart) Fork(n int) []core.PipelineItem {
163164
return core.ForkCopyPipelineItem(ticks, n)
164165
}
165166

167+
// FloorTime is the missing implementation of time.Time.Floor() - round to the nearest less than or equal.
168+
func FloorTime(t time.Time, d time.Duration) time.Time {
169+
// We have check if the regular rounding resulted in Floor() + d.
170+
result := t.Round(d)
171+
if result.After(t) {
172+
result = result.Add(-d)
173+
}
174+
return result
175+
}
176+
166177
func init() {
167178
core.Registry.Register(&TicksSinceStart{})
168179
}

internal/plumbing/ticks_test.go

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ func TestTicksSinceStartRegistration(t *testing.T) {
4949

5050
func TestTicksSinceStartConsume(t *testing.T) {
5151
tss := fixtureTicksSinceStart()
52+
tss.TickSize = time.Second
5253
deps := map[string]interface{}{}
5354
commit, _ := test.Repository.CommitObject(plumbing.NewHash(
5455
"cce947b98a050c6d356bc6ba95030254914027b1"))
@@ -58,10 +59,25 @@ func TestTicksSinceStartConsume(t *testing.T) {
5859
assert.Nil(t, err)
5960
assert.Equal(t, 0, res[DependencyTick].(int))
6061
assert.Equal(t, 0, tss.previousTick)
62+
assert.Equal(t, 2016, tss.tick0.Year())
63+
assert.Equal(t, time.Month(12), tss.tick0.Month())
64+
assert.Equal(t, 12, tss.tick0.Day())
6165
assert.Equal(t, 18, tss.tick0.Hour()) // 18 UTC+1
6266
assert.Equal(t, 30, tss.tick0.Minute()) // 30
6367
assert.Equal(t, 29, tss.tick0.Second()) // 29
6468

69+
tss = fixtureTicksSinceStart()
70+
res, err = tss.Consume(deps)
71+
assert.Nil(t, err)
72+
assert.Equal(t, 0, res[DependencyTick].(int))
73+
assert.Equal(t, 0, tss.previousTick)
74+
assert.Equal(t, 2016, tss.tick0.Year())
75+
assert.Equal(t, time.Month(12), tss.tick0.Month())
76+
assert.Equal(t, 12, tss.tick0.Day())
77+
assert.Equal(t, 1, tss.tick0.Hour()) // UTC+1
78+
assert.Equal(t, 0, tss.tick0.Minute())
79+
assert.Equal(t, 0, tss.tick0.Second())
80+
6581
commit, _ = test.Repository.CommitObject(plumbing.NewHash(
6682
"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
6783
deps[core.DependencyCommit] = commit
@@ -123,9 +139,9 @@ func TestTicksSinceStartConsumeWithTickSize(t *testing.T) {
123139
assert.Nil(t, err)
124140
assert.Equal(t, 0, res[DependencyTick].(int))
125141
assert.Equal(t, 0, tss.previousTick)
126-
assert.Equal(t, 18, tss.tick0.Hour()) // 18 UTC+1
127-
assert.Equal(t, 30, tss.tick0.Minute()) // 30
128-
assert.Equal(t, 29, tss.tick0.Second()) // 29
142+
assert.Equal(t, 18, tss.tick0.Hour()) // 18 UTC+1
143+
assert.Equal(t, 0, tss.tick0.Minute()) // 30
144+
assert.Equal(t, 0, tss.tick0.Second()) // 29
129145

130146
commit, _ = test.Repository.CommitObject(plumbing.NewHash(
131147
"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
@@ -160,7 +176,7 @@ func TestTicksCommits(t *testing.T) {
160176
tss.commits[0] = []plumbing.Hash{plumbing.NewHash(
161177
"cce947b98a050c6d356bc6ba95030254914027b1")}
162178
commits := tss.commits
163-
tss.Initialize(test.Repository)
179+
assert.NoError(t, tss.Initialize(test.Repository))
164180
assert.Len(t, tss.commits, 0)
165181
assert.Equal(t, tss.commits, commits)
166182
}

leaves/burndown.go

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -133,15 +133,15 @@ type BurndownResult struct {
133133
// The rest of the elements are equal the number of line removals by the corresponding
134134
// authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
135135
PeopleMatrix DenseHistory
136-
// The size of each tick.
137-
TickSize time.Duration
138136

139137
// The following members are private.
140138

141139
// reversedPeopleDict is borrowed from IdentityDetector and becomes available after
142140
// Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
143141
// facts[FactIdentityDetectorReversedPeopleDict].
144142
reversedPeopleDict []string
143+
// tickSize references TicksSinceStart.tickSize
144+
tickSize time.Duration
145145
// sampling and granularity are copied from BurndownAnalysis and stored for service purposes
146146
// such as merging several results together.
147147
sampling int
@@ -571,7 +571,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
571571
FileOwnership: fileOwnership,
572572
PeopleHistories: peopleHistories,
573573
PeopleMatrix: peopleMatrix,
574-
TickSize: analyser.tickSize,
574+
tickSize: analyser.tickSize,
575575
reversedPeopleDict: analyser.reversedPeopleDict,
576576
sampling: analyser.Sampling,
577577
granularity: analyser.Granularity,
@@ -613,7 +613,7 @@ func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, er
613613
GlobalHistory: convertCSR(msg.Project),
614614
FileHistories: map[string]DenseHistory{},
615615
FileOwnership: map[string]map[int]int{},
616-
TickSize: time.Duration(msg.GetTickSize()),
616+
tickSize: time.Duration(msg.TickSize),
617617

618618
granularity: int(msg.Granularity),
619619
sampling: int(msg.Sampling),
@@ -649,17 +649,17 @@ func (analyser *BurndownAnalysis) MergeResults(
649649
r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
650650
bar1 := r1.(BurndownResult)
651651
bar2 := r2.(BurndownResult)
652-
if bar1.TickSize != bar2.TickSize {
652+
if bar1.tickSize != bar2.tickSize {
653653
return fmt.Errorf("mismatching tick sizes (r1: %d, r2: %d) received",
654-
bar1.TickSize, bar2.TickSize)
654+
bar1.tickSize, bar2.tickSize)
655655
}
656656
// for backwards-compatibility, if no tick size is present set to default
657-
analyser.tickSize = bar1.TickSize
657+
analyser.tickSize = bar1.tickSize
658658
if analyser.tickSize == 0 {
659659
analyser.tickSize = items.DefaultTicksSinceStartTickSize * time.Hour
660660
}
661661
merged := BurndownResult{
662-
TickSize: analyser.tickSize,
662+
tickSize: analyser.tickSize,
663663
}
664664
if bar1.sampling < bar2.sampling {
665665
merged.sampling = bar1.sampling
@@ -683,6 +683,7 @@ func (analyser *BurndownAnalysis) MergeResults(
683683
bar1.GlobalHistory, bar2.GlobalHistory,
684684
bar1.granularity, bar1.sampling,
685685
bar2.granularity, bar2.sampling,
686+
bar1.tickSize,
686687
c1, c2)
687688
}()
688689
}
@@ -706,6 +707,7 @@ func (analyser *BurndownAnalysis) MergeResults(
706707
m1, m2,
707708
bar1.granularity, bar1.sampling,
708709
bar2.granularity, bar2.sampling,
710+
bar1.tickSize,
709711
c1, c2,
710712
)
711713
}(i)
@@ -755,8 +757,11 @@ func (analyser *BurndownAnalysis) MergeResults(
755757
return merged
756758
}
757759

758-
func (analyser *BurndownAnalysis) roundTime(unix int64, dir bool) int {
759-
ticks := float64(unix) / analyser.tickSize.Seconds()
760+
func roundTime(t time.Time, d time.Duration, dir bool) int {
761+
if !dir {
762+
t = items.FloorTime(t, d)
763+
}
764+
ticks := float64(t.Unix()) / d.Seconds()
760765
if dir {
761766
return int(math.Ceil(ticks))
762767
}
@@ -766,7 +771,8 @@ func (analyser *BurndownAnalysis) roundTime(unix int64, dir bool) int {
766771
// mergeMatrices takes two [number of samples][number of bands] matrices,
767772
// resamples them to ticks so that they become square, sums and resamples back to the
768773
// least of (sampling1, sampling2) and (granularity1, granularity2).
769-
func (analyser *BurndownAnalysis) mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int,
774+
func (analyser *BurndownAnalysis) mergeMatrices(
775+
m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int, tickSize time.Duration,
770776
c1, c2 *core.CommonAnalysisResult) DenseHistory {
771777
commonMerged := c1.Copy()
772778
commonMerged.Merge(c2)
@@ -783,19 +789,19 @@ func (analyser *BurndownAnalysis) mergeMatrices(m1, m2 DenseHistory, granularity
783789
granularity = granularity2
784790
}
785791

786-
size := analyser.roundTime(commonMerged.EndTime, true) -
787-
analyser.roundTime(commonMerged.BeginTime, false)
792+
size := roundTime(commonMerged.EndTimeAsTime(), tickSize, true) -
793+
roundTime(commonMerged.BeginTimeAsTime(), tickSize, false)
788794
perTick := make([][]float32, size+granularity)
789795
for i := range perTick {
790796
perTick[i] = make([]float32, size+sampling)
791797
}
792798
if len(m1) > 0 {
793799
addBurndownMatrix(m1, granularity1, sampling1, perTick,
794-
analyser.roundTime(c1.BeginTime, false)-analyser.roundTime(commonMerged.BeginTime, false))
800+
roundTime(c1.BeginTimeAsTime(), tickSize, false)-roundTime(commonMerged.BeginTimeAsTime(), tickSize, false))
795801
}
796802
if len(m2) > 0 {
797803
addBurndownMatrix(m2, granularity2, sampling2, perTick,
798-
analyser.roundTime(c2.BeginTime, false)-analyser.roundTime(commonMerged.BeginTime, false))
804+
roundTime(c2.BeginTimeAsTime(), tickSize, false)-roundTime(commonMerged.BeginTimeAsTime(), tickSize, false))
799805
}
800806

801807
// convert daily to [][]int64
@@ -992,7 +998,7 @@ func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, accPerTic
992998
func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
993999
fmt.Fprintln(writer, " granularity:", result.granularity)
9941000
fmt.Fprintln(writer, " sampling:", result.sampling)
995-
fmt.Fprintln(writer, " tick_size:", result.TickSize)
1001+
fmt.Fprintln(writer, " tick_size:", int(result.tickSize.Seconds()))
9961002
yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
9971003
if len(result.FileHistories) > 0 {
9981004
fmt.Fprintln(writer, " files:")
@@ -1045,7 +1051,7 @@ func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer
10451051
message := pb.BurndownAnalysisResults{
10461052
Granularity: int32(result.granularity),
10471053
Sampling: int32(result.sampling),
1048-
TickSize: int64(result.TickSize),
1054+
TickSize: int64(result.tickSize),
10491055
}
10501056
if len(result.GlobalHistory) > 0 {
10511057
message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")

0 commit comments

Comments
 (0)