|
73 | 73 | TEST_LOG_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).log
|
74 | 74 | TEST_OK_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).ok
|
75 | 75 |
|
76 |
| -TEST_RATCHET_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4)-metrics.json |
77 |
| -TEST_RATCHET_NOISE_PERCENT=10.0 |
78 |
| - |
79 |
| -# Whether to ratchet or merely save benchmarks |
80 |
| -ifdef CFG_RATCHET_BENCH |
81 |
| -CRATE_TEST_EXTRA_ARGS= \ |
82 |
| - --test $(TEST_BENCH) \ |
83 |
| - --ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \ |
84 |
| - --ratchet-noise-percent $(TEST_RATCHET_NOISE_PERCENT) |
85 |
| -else |
86 |
| -CRATE_TEST_EXTRA_ARGS= \ |
87 |
| - --test $(TEST_BENCH) \ |
88 |
| - --save-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) |
89 |
| -endif |
90 |
| - |
91 | 76 | # If we're sharding the testsuite between parallel testers,
|
92 | 77 | # pass this argument along to the compiletest and crate test
|
93 | 78 | # invocations.
|
@@ -454,7 +439,6 @@ $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \
|
454 | 439 | $$(Q)touch tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).log
|
455 | 440 | $$(Q)$(CFG_ADB) pull $(CFG_ADB_TEST_DIR)/check-stage$(1)-T-$(2)-H-$(3)-$(4).log tmp/
|
456 | 441 | $$(Q)$(CFG_ADB) shell rm $(CFG_ADB_TEST_DIR)/check-stage$(1)-T-$(2)-H-$(3)-$(4).log
|
457 |
| - $$(Q)$(CFG_ADB) pull $(CFG_ADB_TEST_DIR)/$$(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) tmp/ |
458 | 442 | @if grep -q "result: ok" tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).tmp; \
|
459 | 443 | then \
|
460 | 444 | rm tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).tmp; \
|
@@ -696,7 +680,6 @@ CTEST_ARGS$(1)-T-$(2)-H-$(3)-$(4) := \
|
696 | 680 | $$(CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3)) \
|
697 | 681 | --src-base $$(S)src/test/$$(CTEST_SRC_BASE_$(4))/ \
|
698 | 682 | --build-base $(3)/test/$$(CTEST_BUILD_BASE_$(4))/ \
|
699 |
| - --ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \ |
700 | 683 | --mode $$(CTEST_MODE_$(4)) \
|
701 | 684 | $$(CTEST_RUNTOOL_$(4))
|
702 | 685 |
|
|
0 commit comments