Skip to content

High Resolution Metrics Support #96

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jan 23, 2023
15 changes: 12 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ from aws_embedded_metrics import metric_scope
@metric_scope
def my_handler(metrics):
metrics.put_dimensions({"Foo": "Bar"})
metrics.put_metric("ProcessingLatency", 100, "Milliseconds")
metrics.put_metric("ProcessingLatency", 100, "Milliseconds", StorageResolution.STANDARD)
metrics.put_metric("Memory.HeapUsed", 1600424.0, "Bytes", StorageResolution.HIGH)
metrics.set_property("AccountId", "123456789012")
metrics.set_property("RequestId", "422b1569-16f6-4a03")
metrics.set_property("DeviceId", "61270781-c6ac-46f1")
Expand All @@ -53,9 +54,9 @@ def my_handler(metrics):

The `MetricsLogger` is the interface you will use to publish embedded metrics.

- **put_metric**(key: str, value: float, unit: str = "None") -> MetricsLogger
- **put_metric**(key: str, value: float, unit: str = "None", storage_resolution: int = 60) -> MetricsLogger

Adds a new metric to the current logger context. Multiple metrics using the same key will be appended to an array of values. The Embedded Metric Format supports a maximum of 100 values per key. If more metric values are added than are supported by the format, the logger will be flushed to allow for new metric values to be captured.
Adds a new metric to the current logger context. Multiple metrics using the same key will be appended to an array of values. Multiple metrics cannot have same key and different storage resolution. The Embedded Metric Format supports a maximum of 100 values per key. If more metric values are added than are supported by the format, the logger will be flushed to allow for new metric values to be captured.

Requirements:

Expand All @@ -64,10 +65,18 @@ Requirements:
- Values must be in the range of 8.515920e-109 to 1.174271e+108. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.
- Metrics must meet CloudWatch Metrics requirements, otherwise a `InvalidMetricError` will be thrown. See [MetricDatum](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) for valid values.

- ##### Storage Resolution
An OPTIONAL value representing the storage resolution for the corresponding metric. Setting this to `High` specifies this metric as a high-resolution metric, so that CloudWatch stores the metric with sub-minute resolution down to one second. Setting this to `Standard` specifies this metric as a standard-resolution metric, which CloudWatch stores at 1-minute resolution. If a value is not provided, then a default value of `Standard` is assumed. See [Cloud Watch High-Resolution metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics)

Examples:

```py
# Standard Resolution example
put_metric("Latency", 200, "Milliseconds")
put_metric("Latency", 201, "Milliseconds", StorageResolution.STANDARD)

# High Resolution example
put_metric("Memory.HeapUsed", 1600424.0, "Bytes", StorageResolution.HIGH)
```

- **set_property**(key: str, value: Any) -> MetricsLogger
Expand Down
4 changes: 3 additions & 1 deletion aws_embedded_metrics/logger/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aws_embedded_metrics.storage_resolution import StorageResolution


class Metric(object):
def __init__(self, value: float, unit: str = None):
def __init__(self, value: float, unit: str = None, storage_resolution: StorageResolution = StorageResolution.STANDARD):
self.values = [value]
self.unit = unit or "None"
self.storage_resolution = storage_resolution or StorageResolution.STANDARD

def add_value(self, value: float) -> None:
self.values.append(value)
9 changes: 6 additions & 3 deletions aws_embedded_metrics/logger/metrics_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from aws_embedded_metrics.config import get_config
from aws_embedded_metrics.logger.metric import Metric
from aws_embedded_metrics.validator import validate_dimension_set, validate_metric
from aws_embedded_metrics.storage_resolution import StorageResolution
from typing import List, Dict, Any, Set


Expand All @@ -39,8 +40,9 @@ def __init__(
self.metrics: Dict[str, Metric] = {}
self.should_use_default_dimensions = True
self.meta: Dict[str, Any] = {"Timestamp": utils.now()}
self.metric_name_and_resolution_map: Dict[str, StorageResolution] = {}

def put_metric(self, key: str, value: float, unit: str = None) -> None:
def put_metric(self, key: str, value: float, unit: str = None, storage_resolution: StorageResolution = StorageResolution.STANDARD) -> None:
"""
Adds a metric measurement to the context.
Multiple calls using the same key will be stored as an
Expand All @@ -49,13 +51,14 @@ def put_metric(self, key: str, value: float, unit: str = None) -> None:
context.put_metric("Latency", 100, "Milliseconds")
```
"""
validate_metric(key, value, unit)
validate_metric(key, value, unit, storage_resolution, self.metric_name_and_resolution_map)
metric = self.metrics.get(key)
if metric:
# TODO: we should log a warning if the unit has been changed
metric.add_value(value)
else:
self.metrics[key] = Metric(value, unit)
self.metrics[key] = Metric(value, unit, storage_resolution)
self.metric_name_and_resolution_map[key] = storage_resolution

def put_dimensions(self, dimension_set: Dict[str, str]) -> None:
"""
Expand Down
7 changes: 5 additions & 2 deletions aws_embedded_metrics/logger/metrics_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from aws_embedded_metrics.logger.metrics_context import MetricsContext
from aws_embedded_metrics.validator import validate_namespace
from aws_embedded_metrics.config import get_config
from aws_embedded_metrics.storage_resolution import StorageResolution
from typing import Any, Awaitable, Callable, Dict, Tuple
import sys
import traceback
Expand Down Expand Up @@ -78,8 +79,10 @@ def set_namespace(self, namespace: str) -> "MetricsLogger":
self.context.namespace = namespace
return self

def put_metric(self, key: str, value: float, unit: str = "None") -> "MetricsLogger":
self.context.put_metric(key, value, unit)
def put_metric(
self, key: str, value: float, unit: str = "None", storage_resolution: StorageResolution = StorageResolution.STANDARD
) -> "MetricsLogger":
self.context.put_metric(key, value, unit, storage_resolution)
return self

def add_stack_trace(self, key: str, details: Any = None, exc_info: Tuple = None) -> "MetricsLogger":
Expand Down
6 changes: 5 additions & 1 deletion aws_embedded_metrics/serializers/log_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
MAX_DIMENSION_SET_SIZE, MAX_METRICS_PER_EVENT, MAX_DATAPOINTS_PER_METRIC
)
from aws_embedded_metrics.exceptions import DimensionSetExceededError
from aws_embedded_metrics.storage_resolution import StorageResolution
import json
from typing import Any, Dict, List

Expand Down Expand Up @@ -87,8 +88,11 @@ def create_body() -> Dict[str, Any]:
if len(metric.values) > end_index:
remaining_data = True

metric_body = {"Name": metric_name, "Unit": metric.unit}
if metric.storage_resolution == StorageResolution.HIGH:
metric_body["StorageResolution"] = metric.storage_resolution.value # type: ignore
if not config.disable_metric_extraction:
current_body["_aws"]["CloudWatchMetrics"][0]["Metrics"].append({"Name": metric_name, "Unit": metric.unit})
current_body["_aws"]["CloudWatchMetrics"][0]["Metrics"].append(metric_body)
num_metrics_in_current_body += 1

if (num_metrics_in_current_body == MAX_METRICS_PER_EVENT):
Expand Down
16 changes: 16 additions & 0 deletions aws_embedded_metrics/storage_resolution.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from enum import Enum, EnumMeta


class StorageResolutionMeta(EnumMeta):
def __contains__(self, item: object) -> bool:
try:
self(item)
except (ValueError, TypeError):
return False
else:
return True


class StorageResolution(Enum, metaclass=StorageResolutionMeta):
STANDARD = 60
HIGH = 1
12 changes: 11 additions & 1 deletion aws_embedded_metrics/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import re
from typing import Dict, Optional
from aws_embedded_metrics.unit import Unit
from aws_embedded_metrics.storage_resolution import StorageResolution
from aws_embedded_metrics.exceptions import DimensionSetExceededError, InvalidDimensionError, InvalidMetricError, InvalidNamespaceError
import aws_embedded_metrics.constants as constants

Expand Down Expand Up @@ -57,14 +58,16 @@ def validate_dimension_set(dimension_set: Dict[str, str]) -> None:
raise InvalidDimensionError("Dimension name cannot start with ':'")


def validate_metric(name: str, value: float, unit: Optional[str]) -> None:
def validate_metric(name: str, value: float, unit: Optional[str], storage_resolution: StorageResolution, metric_name_and_resolution_map: dict) -> None: # noqa: E501
"""
Validates a metric

Parameters:
name (str): The name of the metric
value (float): The value of the metric
unit (Optional[str]): The unit of the metric
storage_resolution (Optional[int]): The storage resolution of metric
metric_name_and_resolution_map (dict): The map used for validating metric

Raises:
InvalidMetricError: If the metric is invalid
Expand All @@ -81,6 +84,13 @@ def validate_metric(name: str, value: float, unit: Optional[str]) -> None:
if unit is not None and unit not in Unit:
raise InvalidMetricError(f"Metric unit is not valid: {unit}")

if storage_resolution is None or storage_resolution not in StorageResolution:
raise InvalidMetricError(f"Metric storage resolution is not valid: {storage_resolution}")

if name in metric_name_and_resolution_map and metric_name_and_resolution_map.get(name) is not storage_resolution:
raise InvalidMetricError(
f"Resolution for metrics {name} is already set. A single log event cannot have a metric with two different resolutions.")


def validate_namespace(namespace: str) -> None:
"""
Expand Down
2 changes: 2 additions & 0 deletions examples/ec2/app.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from aws_embedded_metrics import metric_scope
from aws_embedded_metrics.storage_resolution import StorageResolution

import logging

Expand All @@ -10,6 +11,7 @@
def my_handler(metrics):
metrics.put_dimensions({"Foo": "Bar"})
metrics.put_metric("ProcessingLatency", 100, "Milliseconds")
metrics.put_metric("CPU Utilization", 87, "Percent", StorageResolution.HIGH)
metrics.set_property("AccountId", "123456789012")
metrics.set_property("RequestId", "422b1569-16f6-4a03-b8f0-fe3fd9b100f8")
metrics.set_property("DeviceId", "61270781-c6ac-46f1-baf7-22c808af8162")
Expand Down
2 changes: 2 additions & 0 deletions examples/lambda/function.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from aws_embedded_metrics import metric_scope
from aws_embedded_metrics.storage_resolution import StorageResolution


@metric_scope
def my_handler(event, context, metrics):
metrics.put_dimensions({"Foo": "Bar"})
metrics.put_metric("ProcessingLatency", 100, "Milliseconds")
metrics.put_metric("CPU Utilization", 87, "Percent", StorageResolution.HIGH)
metrics.set_property("AccountId", "123456789012")
metrics.set_property("RequestId", "422b1569-16f6-4a03-b8f0-fe3fd9b100f8")
metrics.set_property("DeviceId", "61270781-c6ac-46f1-baf7-22c808af8162")
Expand Down
3 changes: 2 additions & 1 deletion tests/canary/agent/canary.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import aws_embedded_metrics
from aws_embedded_metrics import metric_scope
from aws_embedded_metrics.config import get_config
from aws_embedded_metrics.storage_resolution import StorageResolution
from getversion import get_module_version
import os
import psutil
Expand All @@ -26,7 +27,7 @@ async def app(init, last_run_duration, metrics):
metrics.set_dimensions({"Runtime": 'Python', "Platform": 'ECS', "Agent": 'CloudWatchAgent', "Version": version})
metrics.put_metric('Invoke', 1, "Count")
metrics.put_metric('Duration', last_run_duration, 'Seconds')
metrics.put_metric('Memory.RSS', process.memory_info().rss, 'Bytes')
metrics.put_metric('Memory.RSS', process.memory_info().rss, 'Bytes', StorageResolution.HIGH)


async def main():
Expand Down
48 changes: 34 additions & 14 deletions tests/logger/test_metrics_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import random
from aws_embedded_metrics import constants
from aws_embedded_metrics.unit import Unit
from aws_embedded_metrics.storage_resolution import StorageResolution
from aws_embedded_metrics import config
from aws_embedded_metrics.logger.metrics_context import MetricsContext
from aws_embedded_metrics.constants import DEFAULT_NAMESPACE
Expand Down Expand Up @@ -263,14 +264,16 @@ def test_put_metric_adds_metrics():
metric_key = fake.word()
metric_value = fake.random.random()
metric_unit = random.choice(list(Unit)).value
metric_storage_resolution = random.choice(list(StorageResolution)).value

# act
context.put_metric(metric_key, metric_value, metric_unit)
context.put_metric(metric_key, metric_value, metric_unit, metric_storage_resolution)

# assert
metric = context.metrics[metric_key]
assert metric.unit == metric_unit
assert metric.values == [metric_value]
assert metric.storage_resolution == metric_storage_resolution


def test_put_metric_uses_none_unit_if_not_provided():
Expand All @@ -287,26 +290,43 @@ def test_put_metric_uses_none_unit_if_not_provided():
assert metric.unit == "None"


def test_put_metric_uses_standard_storage_resolution_if_not_provided():
# arrange
context = MetricsContext()
metric_key = fake.word()
metric_value = fake.random.random()

# act
context.put_metric(metric_key, metric_value)

# assert
metric = context.metrics[metric_key]
assert metric.storage_resolution == StorageResolution.STANDARD


@pytest.mark.parametrize(
"name, value, unit",
"name, value, unit, storage_resolution",
[
("", 1, "None"),
(" ", 1, "Seconds"),
("a" * (constants.MAX_METRIC_NAME_LENGTH + 1), 1, "None"),
("metric", float("inf"), "Count"),
("metric", float("-inf"), "Count"),
("metric", float("nan"), "Count"),
("metric", math.inf, "Seconds"),
("metric", -math.inf, "Seconds"),
("metric", math.nan, "Seconds"),
("metric", 1, "Kilometers/Fahrenheit")
("", 1, "None", StorageResolution.STANDARD),
(" ", 1, "Seconds", StorageResolution.STANDARD),
("a" * (constants.MAX_METRIC_NAME_LENGTH + 1), 1, "None", StorageResolution.STANDARD),
("metric", float("inf"), "Count", StorageResolution.STANDARD),
("metric", float("-inf"), "Count", StorageResolution.STANDARD),
("metric", float("nan"), "Count", StorageResolution.STANDARD),
("metric", math.inf, "Seconds", StorageResolution.STANDARD),
("metric", -math.inf, "Seconds", StorageResolution.STANDARD),
("metric", math.nan, "Seconds", StorageResolution.STANDARD),
("metric", 1, "Kilometers/Fahrenheit", StorageResolution.STANDARD),
("metric", 1, "Seconds", 2),
("metric", 1, "Seconds", 0),
("metric", 1, "Seconds", None)
]
)
def test_put_invalid_metric_raises_exception(name, value, unit):
def test_put_invalid_metric_raises_exception(name, value, unit, storage_resolution):
context = MetricsContext()

with pytest.raises(InvalidMetricError):
context.put_metric(name, value, unit)
context.put_metric(name, value, unit, storage_resolution)


def test_create_copy_with_context_creates_new_instance():
Expand Down
46 changes: 45 additions & 1 deletion tests/logger/test_metrics_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
from aws_embedded_metrics.logger import metrics_logger
from aws_embedded_metrics.sinks import Sink
from aws_embedded_metrics.environment import Environment
from aws_embedded_metrics.exceptions import InvalidNamespaceError
from aws_embedded_metrics.exceptions import InvalidNamespaceError, InvalidMetricError
from aws_embedded_metrics.storage_resolution import StorageResolution
import aws_embedded_metrics.constants as constants
import pytest
from faker import Faker
Expand Down Expand Up @@ -53,6 +54,49 @@ async def test_can_put_metric(mocker):
assert context.metrics[expected_key].unit == "None"


@pytest.mark.asyncio
async def test_can_put_metric_with_different_storage_resolution_different_flush(mocker):
# arrange
expected_key = fake.word()
expected_value = fake.random.randrange(100)

logger, sink, env = get_logger_and_sink(mocker)

# act
logger.put_metric(expected_key, expected_value, None, StorageResolution.HIGH)
await logger.flush()

# assert
context = sink.accept.call_args[0][0]
assert context.metrics[expected_key].values == [expected_value]
assert context.metrics[expected_key].unit == "None"
assert context.metrics[expected_key].storage_resolution == StorageResolution.HIGH

expected_key = fake.word()
expected_value = fake.random.randrange(100)
logger.put_metric(expected_key, expected_value, None)
await logger.flush()
context = sink.accept.call_args[0][0]
assert context.metrics[expected_key].values == [expected_value]
assert context.metrics[expected_key].unit == "None"
assert context.metrics[expected_key].storage_resolution == StorageResolution.STANDARD


@pytest.mark.asyncio
async def test_cannot_put_metric_with_different_storage_resolution_same_flush(mocker):
# arrange
expected_key = fake.word()
expected_value = fake.random.randrange(100)

logger, sink, env = get_logger_and_sink(mocker)

# act
logger.put_metric(expected_key, expected_value, None, StorageResolution.HIGH)
with pytest.raises(InvalidMetricError):
logger.put_metric(expected_key, expected_value, None, StorageResolution.STANDARD)
await logger.flush()


@pytest.mark.asyncio
async def test_can_add_stack_trace(mocker):
# arrange
Expand Down
Loading