text
stringlengths 81
477k
| file_path
stringlengths 22
92
| module
stringlengths 13
87
| token_count
int64 24
94.8k
| has_source_code
bool 1
class |
|---|---|---|---|---|
// File: crates/analytics/src/payments/metrics/payment_count.rs
// Module: analytics::src::payments::metrics::payment_count
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentCount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentCount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/payment_count.rs
|
analytics::src::payments::metrics::payment_count
| 940
| true
|
// File: crates/analytics/src/payments/metrics/payment_success_count.rs
// Module: analytics::src::payments::metrics::payment_success_count
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentSuccessCount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentSuccessCount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/payment_success_count.rs
|
analytics::src::payments::metrics::payment_success_count
| 990
| true
|
// File: crates/analytics/src/payments/metrics/payment_processed_amount.rs
// Module: analytics::src::payments::metrics::payment_processed_amount
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentProcessedAmount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentProcessedAmount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/payment_processed_amount.rs
|
analytics::src::payments::metrics::payment_processed_amount
| 1,032
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/failure_reasons.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::failure_reasons
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket,
ToSql, Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct FailureReasons;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for FailureReasons
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut inner_query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
inner_query_builder
.add_select_column("sum(sign_flag)")
.switch()?;
inner_query_builder
.add_custom_filter_clause(
PaymentDimensions::ErrorReason,
"NULL",
FilterTypes::IsNotNull,
)
.switch()?;
time_range
.set_filter_clause(&mut inner_query_builder)
.attach_printable("Error filtering time range for inner query")
.switch()?;
let inner_query_string = inner_query_builder
.build_query()
.attach_printable("Error building inner query")
.change_context(MetricsError::QueryBuildingError)?;
let mut outer_query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
outer_query_builder.add_select_column(dim).switch()?;
}
outer_query_builder
.add_select_column("sum(sign_flag) AS count")
.switch()?;
outer_query_builder
.add_select_column(format!("({inner_query_string}) AS total"))
.switch()?;
outer_query_builder
.add_select_column("first_attempt")
.switch()?;
outer_query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
outer_query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters
.set_filter_clause(&mut outer_query_builder)
.switch()?;
auth.set_filter_clause(&mut outer_query_builder).switch()?;
time_range
.set_filter_clause(&mut outer_query_builder)
.attach_printable("Error filtering time range for outer query")
.switch()?;
outer_query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Failure,
)
.switch()?;
outer_query_builder
.add_custom_filter_clause(
PaymentDimensions::ErrorReason,
"NULL",
FilterTypes::IsNotNull,
)
.switch()?;
for dim in dimensions.iter() {
outer_query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
outer_query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut outer_query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
outer_query_builder
.add_order_by_clause("count", Order::Descending)
.attach_printable("Error adding order by clause")
.switch()?;
let filtered_dimensions: Vec<&PaymentDimensions> = dimensions
.iter()
.filter(|&&dim| dim != PaymentDimensions::ErrorReason)
.collect();
for dim in &filtered_dimensions {
outer_query_builder
.add_order_by_clause(*dim, Order::Ascending)
.attach_printable("Error adding order by clause")
.switch()?;
}
outer_query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/failure_reasons.rs
|
analytics::src::payments::metrics::sessionized_metrics::failure_reasons
| 1,402
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/connector_success_rate.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::connector_success_rate
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct ConnectorSuccessRate;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for ConnectorSuccessRate
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause(PaymentDimensions::Connector, "NULL", FilterTypes::IsNotNull)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/connector_success_rate.rs
|
analytics::src::payments::metrics::sessionized_metrics::connector_success_rate
| 1,012
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/retries_count.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::retries_count
use std::collections::HashSet;
use api_models::{
analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
},
enums::IntentStatus,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct RetriesCount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for RetriesCount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
_dimensions: &[PaymentDimensions],
auth: &AuthInfo,
_filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/retries_count.rs
|
analytics::src::payments::metrics::sessionized_metrics::retries_count
| 993
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/payments_distribution.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::payments_distribution
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentsDistribution;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentsDistribution
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder.add_select_column("first_attempt").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/payments_distribution.rs
|
analytics::src::payments::metrics::sessionized_metrics::payments_distribution
| 1,016
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/success_rate.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::success_rate
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentSuccessRate;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentSuccessRate
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/success_rate.rs
|
analytics::src::payments::metrics::sessionized_metrics::success_rate
| 974
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/avg_ticket_size.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::avg_ticket_size
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::{PaymentMetric, PaymentMetricRow};
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct AvgTicketSize;
#[async_trait::async_trait]
impl<T> PaymentMetric<T> for AvgTicketSize
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/avg_ticket_size.rs
|
analytics::src::payments::metrics::sessionized_metrics::avg_ticket_size
| 1,048
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/debit_routing.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::debit_routing
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct DebitRouting;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for DebitRouting
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "debit_routing_savings",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/debit_routing.rs
|
analytics::src::payments::metrics::sessionized_metrics::debit_routing
| 1,077
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/payment_count.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::payment_count
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentCount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentCount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/payment_count.rs
|
analytics::src::payments::metrics::sessionized_metrics::payment_count
| 950
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/payment_success_count.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::payment_success_count
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentSuccessCount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentSuccessCount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/payment_success_count.rs
|
analytics::src::payments::metrics::sessionized_metrics::payment_success_count
| 1,000
| true
|
// File: crates/analytics/src/payments/metrics/sessionized_metrics/payment_processed_amount.rs
// Module: analytics::src::payments::metrics::sessionized_metrics::payment_processed_amount
use std::collections::HashSet;
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentProcessedAmount;
#[async_trait::async_trait]
impl<T> super::PaymentMetric<T> for PaymentProcessedAmount
where
T: AnalyticsDataSource + super::PaymentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentDimensions::PaymentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder.add_select_column("first_attempt").switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Charged,
)
.switch()?;
query_builder
.execute_query::<PaymentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
None,
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentMetricsBucketIdentifier, PaymentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/metrics/sessionized_metrics/payment_processed_amount.rs
|
analytics::src::payments::metrics::sessionized_metrics::payment_processed_amount
| 1,140
| true
|
// File: crates/analytics/src/payments/distribution/payment_error_message.rs
// Module: analytics::src::payments::distribution::payment_error_message
use api_models::analytics::{
payments::{PaymentDimensions, PaymentFilters, PaymentMetricsBucketIdentifier},
Granularity, PaymentDistributionBody, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::{PaymentDistribution, PaymentDistributionRow};
use crate::{
enums::AuthInfo,
query::{
Aggregate, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentErrorMessage;
#[async_trait::async_trait]
impl<T> PaymentDistribution<T> for PaymentErrorMessage
where
T: AnalyticsDataSource + super::PaymentDistributionAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_distribution(
&self,
distribution: &PaymentDistributionBody,
dimensions: &[PaymentDimensions],
auth: &AuthInfo,
filters: &PaymentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Payment);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(&distribution.distribution_for)
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause(&distribution.distribution_for)
.attach_printable("Error grouping by distribution_for")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentDimensions::PaymentStatus,
storage_enums::AttemptStatus::Failure,
)
.switch()?;
for dim in dimensions.iter() {
query_builder.add_outer_select_column(dim).switch()?;
}
query_builder
.add_outer_select_column(&distribution.distribution_for)
.switch()?;
query_builder.add_outer_select_column("count").switch()?;
query_builder
.add_outer_select_column("start_bucket")
.switch()?;
query_builder
.add_outer_select_column("end_bucket")
.switch()?;
let sql_dimensions = query_builder.transform_to_sql_values(dimensions).switch()?;
query_builder
.add_outer_select_column(Window::Sum {
field: "count",
partition_by: Some(sql_dimensions),
order_by: None,
alias: Some("total"),
})
.switch()?;
query_builder
.add_top_n_clause(
dimensions,
distribution.distribution_cardinality.into(),
"count",
Order::Descending,
)
.switch()?;
query_builder
.execute_query::<PaymentDistributionRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentMetricsBucketIdentifier::new(
i.currency.as_ref().map(|i| i.0),
i.status.as_ref().map(|i| i.0),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.client_source.clone(),
i.client_version.clone(),
i.profile_id.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
i.routing_approach.as_ref().map(|i| i.0.clone()),
i.signature_network.clone(),
i.is_issuer_regulated,
i.is_debit_routed,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
Vec<(PaymentMetricsBucketIdentifier, PaymentDistributionRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payments/distribution/payment_error_message.rs
|
analytics::src::payments::distribution::payment_error_message
| 1,254
| true
|
// File: crates/analytics/src/routing_events/core.rs
// Module: analytics::src::routing_events::core
use api_models::analytics::routing_events::RoutingEventsRequest;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use super::events::{get_routing_events, RoutingEventsResult};
use crate::{errors::AnalyticsResult, types::FiltersError, AnalyticsProvider};
pub async fn routing_events_core(
pool: &AnalyticsProvider,
req: RoutingEventsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<RoutingEventsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"Connector Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Connector Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_routing_events(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
|
crates/analytics/src/routing_events/core.rs
|
analytics::src::routing_events::core
| 248
| true
|
// File: crates/analytics/src/routing_events/events.rs
// Module: analytics::src::routing_events::events
use api_models::analytics::{routing_events::RoutingEventsRequest, Granularity};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
};
pub trait RoutingEventLogAnalytics: LoadRow<RoutingEventsResult> {}
pub async fn get_routing_events<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: RoutingEventsRequest,
pool: &T,
) -> FiltersResult<Vec<RoutingEventsResult>>
where
T: AnalyticsDataSource + RoutingEventLogAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::RoutingEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder
.add_filter_clause("payment_id", &query_param.payment_id)
.switch()?;
if let Some(refund_id) = query_param.refund_id {
query_builder
.add_filter_clause("refund_id", &refund_id)
.switch()?;
}
if let Some(dispute_id) = query_param.dispute_id {
query_builder
.add_filter_clause("dispute_id", &dispute_id)
.switch()?;
}
query_builder
.execute_query::<RoutingEventsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct RoutingEventsResult {
pub merchant_id: common_utils::id_type::MerchantId,
pub profile_id: common_utils::id_type::ProfileId,
pub payment_id: String,
pub routable_connectors: String,
pub payment_connector: Option<String>,
pub request_id: Option<String>,
pub flow: String,
pub url: Option<String>,
pub request: String,
pub response: Option<String>,
pub error: Option<String>,
pub status_code: Option<u16>,
#[serde(with = "common_utils::custom_serde::iso8601")]
pub created_at: PrimitiveDateTime,
pub method: String,
pub routing_engine: String,
pub routing_approach: Option<String>,
}
|
crates/analytics/src/routing_events/events.rs
|
analytics::src::routing_events::events
| 598
| true
|
// File: crates/analytics/src/connector_events/core.rs
// Module: analytics::src::connector_events::core
use api_models::analytics::connector_events::ConnectorEventsRequest;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use super::events::{get_connector_events, ConnectorEventsResult};
use crate::{errors::AnalyticsResult, types::FiltersError, AnalyticsProvider};
pub async fn connector_events_core(
pool: &AnalyticsProvider,
req: ConnectorEventsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<ConnectorEventsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"Connector Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Connector Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_connector_events(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
|
crates/analytics/src/connector_events/core.rs
|
analytics::src::connector_events::core
| 248
| true
|
// File: crates/analytics/src/connector_events/events.rs
// Module: analytics::src::connector_events::events
use api_models::analytics::{connector_events::ConnectorEventsRequest, Granularity};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
};
pub trait ConnectorEventLogAnalytics: LoadRow<ConnectorEventsResult> {}
pub async fn get_connector_events<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: ConnectorEventsRequest,
pool: &T,
) -> FiltersResult<Vec<ConnectorEventsResult>>
where
T: AnalyticsDataSource + ConnectorEventLogAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::ConnectorEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder
.add_filter_clause("payment_id", &query_param.payment_id)
.switch()?;
if let Some(refund_id) = query_param.refund_id {
query_builder
.add_filter_clause("refund_id", &refund_id)
.switch()?;
}
if let Some(dispute_id) = query_param.dispute_id {
query_builder
.add_filter_clause("dispute_id", &dispute_id)
.switch()?;
}
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<ConnectorEventsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct ConnectorEventsResult {
pub merchant_id: common_utils::id_type::MerchantId,
pub payment_id: String,
pub connector_name: Option<String>,
pub request_id: Option<String>,
pub flow: String,
pub request: String,
#[serde(rename = "masked_response")]
pub response: Option<String>,
pub error: Option<String>,
pub status_code: u16,
pub latency: Option<u128>,
#[serde(with = "common_utils::custom_serde::iso8601")]
pub created_at: PrimitiveDateTime,
pub method: Option<String>,
}
|
crates/analytics/src/connector_events/events.rs
|
analytics::src::connector_events::events
| 589
| true
|
// File: crates/analytics/src/api_event/core.rs
// Module: analytics::src::api_event::core
use std::collections::HashMap;
use api_models::analytics::{
api_event::{
ApiEventMetricsBucketIdentifier, ApiEventMetricsBucketValue, ApiLogsRequest,
ApiMetricsBucketResponse,
},
AnalyticsMetadata, ApiEventFiltersResponse, GetApiEventFiltersRequest,
GetApiEventMetricRequest, MetricsResponse,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use router_env::{
instrument, logger,
tracing::{self, Instrument},
};
use super::{
events::{get_api_event, ApiLogsResult},
metrics::ApiEventMetricRow,
};
use crate::{
errors::{AnalyticsError, AnalyticsResult},
metrics,
types::FiltersError,
AnalyticsProvider,
};
#[instrument(skip_all)]
pub async fn api_events_core(
pool: &AnalyticsProvider,
req: ApiLogsRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<Vec<ApiLogsResult>> {
let data = match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"API Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for API Events"),
AnalyticsProvider::Clickhouse(pool) => get_api_event(merchant_id, req, pool).await,
AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
| AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
get_api_event(merchant_id, req, ckh_pool).await
}
}
.switch()?;
Ok(data)
}
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetApiEventFiltersRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<ApiEventFiltersResponse> {
use api_models::analytics::{api_event::ApiEventDimensions, ApiEventFilterValue};
use super::filters::get_api_event_filter_for_dimension;
use crate::api_event::filters::ApiEventFilter;
let mut res = ApiEventFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented(
"API Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for API Events"),
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool) => {
get_api_event_filter_for_dimension(dim, merchant_id, &req.time_range, ckh_pool)
.await
}
}
.switch()?
.into_iter()
.filter_map(|fil: ApiEventFilter| match dim {
ApiEventDimensions::StatusCode => fil.status_code.map(|i| i.to_string()),
ApiEventDimensions::FlowType => fil.flow_type,
ApiEventDimensions::ApiFlow => fil.api_flow,
})
.collect::<Vec<String>>();
res.query_data.push(ApiEventFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
#[instrument(skip_all)]
pub async fn get_api_event_metrics(
pool: &AnalyticsProvider,
merchant_id: &common_utils::id_type::MerchantId,
req: GetApiEventMetricRequest,
) -> AnalyticsResult<MetricsResponse<ApiMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<ApiEventMetricsBucketIdentifier, ApiEventMetricRow> =
HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_api_metrics_query",
api_event_metric = metric_type.as_ref()
);
// TODO: lifetime issues with joinset,
// can be optimized away if joinset lifetime requirements are relaxed
let merchant_id_scoped = merchant_id.to_owned();
set.spawn(
async move {
let data = pool
.get_api_event_metrics(
&metric_type,
&req.group_by_names.clone(),
&merchant_id_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
metrics_accumulator
.entry(id)
.and_modify(|data| {
data.api_count = data.api_count.or(value.api_count);
data.status_code_count = data.status_code_count.or(value.status_code_count);
data.latency = data.latency.or(value.latency);
})
.or_insert(value);
}
}
let query_data: Vec<ApiMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| ApiMetricsBucketResponse {
values: ApiEventMetricsBucketValue {
latency: val.latency,
api_count: val.api_count,
status_code_count: val.status_code_count,
},
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
|
crates/analytics/src/api_event/core.rs
|
analytics::src::api_event::core
| 1,314
| true
|
// File: crates/analytics/src/api_event/types.rs
// Module: analytics::src::api_event::types
use api_models::analytics::api_event::{ApiEventDimensions, ApiEventFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for ApiEventFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.status_code.is_empty() {
builder
.add_filter_in_range_clause(ApiEventDimensions::StatusCode, &self.status_code)
.attach_printable("Error adding status_code filter")?;
}
if !self.flow_type.is_empty() {
builder
.add_filter_in_range_clause(ApiEventDimensions::FlowType, &self.flow_type)
.attach_printable("Error adding flow_type filter")?;
}
if !self.api_flow.is_empty() {
builder
.add_filter_in_range_clause(ApiEventDimensions::ApiFlow, &self.api_flow)
.attach_printable("Error adding api_name filter")?;
}
Ok(())
}
}
|
crates/analytics/src/api_event/types.rs
|
analytics::src::api_event::types
| 271
| true
|
// File: crates/analytics/src/api_event/events.rs
// Module: analytics::src::api_event::events
use api_models::analytics::{
api_event::{ApiLogsRequest, QueryType},
Granularity,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use router_env::Flow;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
};
pub trait ApiLogsFilterAnalytics: LoadRow<ApiLogsResult> {}
pub async fn get_api_event<T>(
merchant_id: &common_utils::id_type::MerchantId,
query_param: ApiLogsRequest,
pool: &T,
) -> FiltersResult<Vec<ApiLogsResult>>
where
T: AnalyticsDataSource + ApiLogsFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
match query_param.query_param {
QueryType::Payment { payment_id } => {
query_builder
.add_filter_clause("payment_id", &payment_id)
.switch()?;
query_builder
.add_filter_in_range_clause(
"api_flow",
&[
Flow::PaymentsCancel,
Flow::PaymentsCapture,
Flow::PaymentsConfirm,
Flow::PaymentsCreate,
Flow::PaymentsStart,
Flow::PaymentsUpdate,
Flow::RefundsCreate,
Flow::RefundsUpdate,
Flow::DisputesEvidenceSubmit,
Flow::AttachDisputeEvidence,
Flow::RetrieveDisputeEvidence,
Flow::IncomingWebhookReceive,
],
)
.switch()?;
}
QueryType::Refund {
payment_id,
refund_id,
} => {
query_builder
.add_filter_clause("payment_id", &payment_id)
.switch()?;
query_builder
.add_filter_clause("refund_id", refund_id)
.switch()?;
query_builder
.add_filter_in_range_clause("api_flow", &[Flow::RefundsCreate, Flow::RefundsUpdate])
.switch()?;
}
QueryType::Dispute {
payment_id,
dispute_id,
} => {
query_builder
.add_filter_clause("payment_id", &payment_id)
.switch()?;
query_builder
.add_filter_clause("dispute_id", dispute_id)
.switch()?;
query_builder
.add_filter_in_range_clause(
"api_flow",
&[
Flow::DisputesEvidenceSubmit,
Flow::AttachDisputeEvidence,
Flow::RetrieveDisputeEvidence,
],
)
.switch()?;
}
}
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<ApiLogsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct ApiLogsResult {
pub merchant_id: common_utils::id_type::MerchantId,
pub payment_id: Option<common_utils::id_type::PaymentId>,
pub refund_id: Option<String>,
pub payment_method_id: Option<String>,
pub payment_method: Option<String>,
pub payment_method_type: Option<String>,
pub customer_id: Option<String>,
pub user_id: Option<String>,
pub connector: Option<String>,
pub request_id: Option<String>,
pub flow_type: String,
pub api_flow: String,
pub api_auth_type: Option<String>,
pub request: String,
pub response: Option<String>,
pub error: Option<String>,
pub authentication_data: Option<String>,
pub status_code: u16,
pub latency: Option<u128>,
pub user_agent: Option<String>,
pub hs_latency: Option<u128>,
pub ip_addr: Option<String>,
#[serde(with = "common_utils::custom_serde::iso8601")]
pub created_at: PrimitiveDateTime,
pub http_method: Option<String>,
pub url_path: Option<String>,
}
|
crates/analytics/src/api_event/events.rs
|
analytics::src::api_event::events
| 970
| true
|
// File: crates/analytics/src/api_event/metrics.rs
// Module: analytics::src::api_event::metrics
use api_models::analytics::{
api_event::{
ApiEventDimensions, ApiEventFilters, ApiEventMetrics, ApiEventMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult},
};
mod api_count;
pub mod latency;
mod status_code_count;
use std::collections::HashSet;
use api_count::ApiCount;
use latency::MaxLatency;
use status_code_count::StatusCodeCount;
use self::latency::LatencyAvg;
#[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)]
pub struct ApiEventMetricRow {
pub latency: Option<u64>,
pub api_count: Option<u64>,
pub status_code_count: Option<u64>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
pub trait ApiEventMetricAnalytics: LoadRow<ApiEventMetricRow> + LoadRow<LatencyAvg> {}
#[async_trait::async_trait]
pub trait ApiEventMetric<T>
where
T: AnalyticsDataSource + ApiEventMetricAnalytics,
{
async fn load_metrics(
&self,
dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> ApiEventMetric<T> for ApiEventMetrics
where
T: AnalyticsDataSource + ApiEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
match self {
Self::Latency => {
MaxLatency
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::ApiCount => {
ApiCount
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::StatusCodeCount => {
StatusCodeCount
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
}
|
crates/analytics/src/api_event/metrics.rs
|
analytics::src::api_event::metrics
| 709
| true
|
// File: crates/analytics/src/api_event/filters.rs
// Module: analytics::src::api_event::filters
use api_models::analytics::{api_event::ApiEventDimensions, Granularity, TimeRange};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
};
pub trait ApiEventFilterAnalytics: LoadRow<ApiEventFilter> {}
pub async fn get_api_event_filter_for_dimension<T>(
dimension: ApiEventDimensions,
merchant_id: &common_utils::id_type::MerchantId,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<ApiEventFilter>>
where
T: AnalyticsDataSource + ApiEventFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<ApiEventFilter, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct ApiEventFilter {
pub status_code: Option<i32>,
pub flow_type: Option<String>,
pub api_flow: Option<String>,
}
|
crates/analytics/src/api_event/filters.rs
|
analytics::src::api_event::filters
| 427
| true
|
// File: crates/analytics/src/api_event/metrics/latency.rs
// Module: analytics::src::api_event::metrics::latency
use std::collections::HashSet;
use api_models::analytics::{
api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::ApiEventMetricRow;
use crate::{
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct MaxLatency;
#[async_trait::async_trait]
impl<T> super::ApiEventMetric<T> for MaxLatency
where
T: AnalyticsDataSource + super::ApiEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
_dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder
.add_select_column(Aggregate::Sum {
field: "latency",
alias: Some("latency_sum"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: Some("latency"),
alias: Some("latency_count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_custom_filter_clause("request", "10.63.134.6", FilterTypes::NotLike)
.attach_printable("Error filtering out locker IP")
.switch()?;
query_builder
.execute_query::<LatencyAvg, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
ApiEventMetricsBucketIdentifier::new(TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
}),
ApiEventMetricRow {
latency: if i.latency_count != 0 {
Some(i.latency_sum.unwrap_or(0) / i.latency_count)
} else {
None
},
api_count: None,
status_code_count: None,
start_bucket: i.start_bucket,
end_bucket: i.end_bucket,
},
))
})
.collect::<error_stack::Result<
HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
#[derive(Debug, PartialEq, Eq, serde::Deserialize)]
pub struct LatencyAvg {
latency_sum: Option<u64>,
latency_count: u64,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
|
crates/analytics/src/api_event/metrics/latency.rs
|
analytics::src::api_event::metrics::latency
| 1,038
| true
|
// File: crates/analytics/src/api_event/metrics/status_code_count.rs
// Module: analytics::src::api_event::metrics::status_code_count
use std::collections::HashSet;
use api_models::analytics::{
api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::ApiEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct StatusCodeCount;
#[async_trait::async_trait]
impl<T> super::ApiEventMetric<T> for StatusCodeCount
where
T: AnalyticsDataSource + super::ApiEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
_dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder
.add_select_column(Aggregate::Count {
field: Some("status_code"),
alias: Some("status_code_count"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<ApiEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
ApiEventMetricsBucketIdentifier::new(TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
}),
i,
))
})
.collect::<error_stack::Result<
HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/api_event/metrics/status_code_count.rs
|
analytics::src::api_event::metrics::status_code_count
| 774
| true
|
// File: crates/analytics/src/api_event/metrics/api_count.rs
// Module: analytics::src::api_event::metrics::api_count
use std::collections::HashSet;
use api_models::analytics::{
api_event::{ApiEventDimensions, ApiEventFilters, ApiEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::ApiEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct ApiCount;
#[async_trait::async_trait]
impl<T> super::ApiEventMetric<T> for ApiCount
where
T: AnalyticsDataSource + super::ApiEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
_dimensions: &[ApiEventDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &ApiEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::ApiEvents);
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("api_count"),
})
.switch()?;
if !filters.flow_type.is_empty() {
query_builder
.add_filter_in_range_clause(ApiEventDimensions::FlowType, &filters.flow_type)
.attach_printable("Error adding flow_type filter")
.switch()?;
}
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.execute_query::<ApiEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
ApiEventMetricsBucketIdentifier::new(TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
}),
i,
))
})
.collect::<error_stack::Result<
HashSet<(ApiEventMetricsBucketIdentifier, ApiEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/api_event/metrics/api_count.rs
|
analytics::src::api_event::metrics::api_count
| 805
| true
|
// File: crates/analytics/src/payment_intents/core.rs
// Module: analytics::src::payment_intents::core
#![allow(dead_code)]
use std::collections::{HashMap, HashSet};
use api_models::analytics::{
payment_intents::{
MetricsBucketResponse, PaymentIntentDimensions, PaymentIntentMetrics,
PaymentIntentMetricsBucketIdentifier,
},
GetPaymentIntentFiltersRequest, GetPaymentIntentMetricRequest, PaymentIntentFilterValue,
PaymentIntentFiltersResponse, PaymentIntentsAnalyticsMetadata, PaymentIntentsMetricsResponse,
};
use bigdecimal::ToPrimitive;
use common_enums::Currency;
use common_utils::{errors::CustomResult, types::TimeRange};
use currency_conversion::{conversion::convert, types::ExchangeRates};
use error_stack::ResultExt;
use router_env::{
instrument, logger,
tracing::{self, Instrument},
};
use super::{
filters::{get_payment_intent_filter_for_dimension, PaymentIntentFilterRow},
metrics::PaymentIntentMetricRow,
sankey::{get_sankey_data, SankeyRow},
PaymentIntentMetricsAccumulator,
};
use crate::{
enums::AuthInfo,
errors::{AnalyticsError, AnalyticsResult},
metrics,
payment_intents::PaymentIntentMetricAccumulator,
AnalyticsProvider,
};
#[derive(Debug)]
pub enum TaskType {
MetricTask(
PaymentIntentMetrics,
CustomResult<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
AnalyticsError,
>,
),
}
#[instrument(skip_all)]
pub async fn get_sankey(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: TimeRange,
) -> AnalyticsResult<Vec<SankeyRow>> {
match pool {
AnalyticsProvider::Sqlx(_) => Err(AnalyticsError::NotImplemented(
"Sankey not implemented for sqlx",
))?,
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool) => {
let sankey_rows = get_sankey_data(ckh_pool, auth, &req)
.await
.change_context(AnalyticsError::UnknownError)?;
Ok(sankey_rows)
}
}
}
#[instrument(skip_all)]
pub async fn get_metrics(
pool: &AnalyticsProvider,
ex_rates: &Option<ExchangeRates>,
auth: &AuthInfo,
req: GetPaymentIntentMetricRequest,
) -> AnalyticsResult<PaymentIntentsMetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
PaymentIntentMetricsBucketIdentifier,
PaymentIntentMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_payment_intents_metrics_query",
payment_metric = metric_type.as_ref()
);
// TODO: lifetime issues with joinset,
// can be optimized away if joinset lifetime requirements are relaxed
let auth_scoped = auth.to_owned();
set.spawn(
async move {
let data = pool
.get_payment_intent_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
TaskType::MetricTask(metric_type, data)
}
.instrument(task_span),
);
}
while let Some(task_type) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
match task_type {
TaskType::MetricTask(metric, data) => {
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
PaymentIntentMetrics::SuccessfulSmartRetries
| PaymentIntentMetrics::SessionizedSuccessfulSmartRetries => {
metrics_builder
.successful_smart_retries
.add_metrics_bucket(&value)
}
PaymentIntentMetrics::TotalSmartRetries
| PaymentIntentMetrics::SessionizedTotalSmartRetries => metrics_builder
.total_smart_retries
.add_metrics_bucket(&value),
PaymentIntentMetrics::SmartRetriedAmount
| PaymentIntentMetrics::SessionizedSmartRetriedAmount => metrics_builder
.smart_retried_amount
.add_metrics_bucket(&value),
PaymentIntentMetrics::PaymentIntentCount
| PaymentIntentMetrics::SessionizedPaymentIntentCount => metrics_builder
.payment_intent_count
.add_metrics_bucket(&value),
PaymentIntentMetrics::PaymentsSuccessRate
| PaymentIntentMetrics::SessionizedPaymentsSuccessRate => metrics_builder
.payments_success_rate
.add_metrics_bucket(&value),
PaymentIntentMetrics::SessionizedPaymentProcessedAmount
| PaymentIntentMetrics::PaymentProcessedAmount => metrics_builder
.payment_processed_amount
.add_metrics_bucket(&value),
PaymentIntentMetrics::SessionizedPaymentsDistribution => metrics_builder
.payments_distribution
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
}
}
let mut success = 0;
let mut success_without_smart_retries = 0;
let mut total_smart_retried_amount = 0;
let mut total_smart_retried_amount_in_usd = 0;
let mut total_smart_retried_amount_without_smart_retries = 0;
let mut total_smart_retried_amount_without_smart_retries_in_usd = 0;
let mut total = 0;
let mut total_payment_processed_amount = 0;
let mut total_payment_processed_amount_in_usd = 0;
let mut total_payment_processed_count = 0;
let mut total_payment_processed_amount_without_smart_retries = 0;
let mut total_payment_processed_amount_without_smart_retries_in_usd = 0;
let mut total_payment_processed_count_without_smart_retries = 0;
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let mut collected_values = val.collect();
if let Some(success_count) = collected_values.successful_payments {
success += success_count;
}
if let Some(success_count) = collected_values.successful_payments_without_smart_retries
{
success_without_smart_retries += success_count;
}
if let Some(total_count) = collected_values.total_payments {
total += total_count;
}
if let Some(retried_amount) = collected_values.smart_retried_amount {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(retried_amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.smart_retried_amount_in_usd = amount_in_usd;
total_smart_retried_amount += retried_amount;
total_smart_retried_amount_in_usd += amount_in_usd.unwrap_or(0);
}
if let Some(retried_amount) =
collected_values.smart_retried_amount_without_smart_retries
{
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(retried_amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.smart_retried_amount_without_smart_retries_in_usd = amount_in_usd;
total_smart_retried_amount_without_smart_retries += retried_amount;
total_smart_retried_amount_without_smart_retries_in_usd +=
amount_in_usd.unwrap_or(0);
}
if let Some(amount) = collected_values.payment_processed_amount {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.payment_processed_amount_in_usd = amount_in_usd;
total_payment_processed_amount_in_usd += amount_in_usd.unwrap_or(0);
total_payment_processed_amount += amount;
}
if let Some(count) = collected_values.payment_processed_count {
total_payment_processed_count += count;
}
if let Some(amount) = collected_values.payment_processed_amount_without_smart_retries {
let amount_in_usd = if let Some(ex_rates) = ex_rates {
id.currency
.and_then(|currency| {
i64::try_from(amount)
.inspect_err(|e| logger::error!("Amount conversion error: {:?}", e))
.ok()
.and_then(|amount_i64| {
convert(ex_rates, currency, Currency::USD, amount_i64)
.inspect_err(|e| {
logger::error!("Currency conversion error: {:?}", e)
})
.ok()
})
})
.map(|amount| (amount * rust_decimal::Decimal::new(100, 0)).to_u64())
.unwrap_or_default()
} else {
None
};
collected_values.payment_processed_amount_without_smart_retries_in_usd =
amount_in_usd;
total_payment_processed_amount_without_smart_retries_in_usd +=
amount_in_usd.unwrap_or(0);
total_payment_processed_amount_without_smart_retries += amount;
}
if let Some(count) = collected_values.payment_processed_count_without_smart_retries {
total_payment_processed_count_without_smart_retries += count;
}
MetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
let total_success_rate = match (success, total) {
(s, t) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
let total_success_rate_without_smart_retries = match (success_without_smart_retries, total) {
(s, t) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
Ok(PaymentIntentsMetricsResponse {
query_data,
meta_data: [PaymentIntentsAnalyticsMetadata {
total_success_rate,
total_success_rate_without_smart_retries,
total_smart_retried_amount: Some(total_smart_retried_amount),
total_smart_retried_amount_without_smart_retries: Some(
total_smart_retried_amount_without_smart_retries,
),
total_payment_processed_amount: Some(total_payment_processed_amount),
total_payment_processed_amount_without_smart_retries: Some(
total_payment_processed_amount_without_smart_retries,
),
total_smart_retried_amount_in_usd: if ex_rates.is_some() {
Some(total_smart_retried_amount_in_usd)
} else {
None
},
total_smart_retried_amount_without_smart_retries_in_usd: if ex_rates.is_some() {
Some(total_smart_retried_amount_without_smart_retries_in_usd)
} else {
None
},
total_payment_processed_amount_in_usd: if ex_rates.is_some() {
Some(total_payment_processed_amount_in_usd)
} else {
None
},
total_payment_processed_amount_without_smart_retries_in_usd: if ex_rates.is_some() {
Some(total_payment_processed_amount_without_smart_retries_in_usd)
} else {
None
},
total_payment_processed_count: Some(total_payment_processed_count),
total_payment_processed_count_without_smart_retries: Some(
total_payment_processed_count_without_smart_retries,
),
}],
})
}
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetPaymentIntentFiltersRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<PaymentIntentFiltersResponse> {
let mut res = PaymentIntentFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_payment_intent_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_payment_intent_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_poll, ckh_pool) => {
let ckh_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_poll,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payment intents analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_poll, ckh_pool) => {
let ckh_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_payment_intent_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_poll,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres payment intents analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: PaymentIntentFilterRow| match dim {
PaymentIntentDimensions::PaymentIntentStatus => fil.status.map(|i| i.as_ref().to_string()),
PaymentIntentDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
PaymentIntentDimensions::ProfileId => fil.profile_id,
PaymentIntentDimensions::Connector => fil.connector,
PaymentIntentDimensions::AuthType => fil.authentication_type.map(|i| i.as_ref().to_string()),
PaymentIntentDimensions::PaymentMethod => fil.payment_method,
PaymentIntentDimensions::PaymentMethodType => fil.payment_method_type,
PaymentIntentDimensions::CardNetwork => fil.card_network,
PaymentIntentDimensions::MerchantId => fil.merchant_id,
PaymentIntentDimensions::CardLast4 => fil.card_last_4,
PaymentIntentDimensions::CardIssuer => fil.card_issuer,
PaymentIntentDimensions::ErrorReason => fil.error_reason,
})
.collect::<Vec<String>>();
res.query_data.push(PaymentIntentFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
|
crates/analytics/src/payment_intents/core.rs
|
analytics::src::payment_intents::core
| 3,769
| true
|
// File: crates/analytics/src/payment_intents/types.rs
// Module: analytics::src::payment_intents::types
use api_models::analytics::payment_intents::{PaymentIntentDimensions, PaymentIntentFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for PaymentIntentFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.status.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::PaymentIntentStatus,
&self.status,
)
.attach_printable("Error adding payment intent status filter")?;
}
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::Currency, &self.currency)
.attach_printable("Error adding currency filter")?;
}
if !self.profile_id.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::ProfileId, &self.profile_id)
.attach_printable("Error adding profile id filter")?;
}
if !self.connector.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::Connector, &self.connector)
.attach_printable("Error adding connector filter")?;
}
if !self.auth_type.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::AuthType, &self.auth_type)
.attach_printable("Error adding auth type filter")?;
}
if !self.payment_method.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::PaymentMethod,
&self.payment_method,
)
.attach_printable("Error adding payment method filter")?;
}
if !self.payment_method_type.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::PaymentMethodType,
&self.payment_method_type,
)
.attach_printable("Error adding payment method type filter")?;
}
if !self.card_network.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::CardNetwork,
&self.card_network,
)
.attach_printable("Error adding card network filter")?;
}
if !self.merchant_id.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::MerchantId, &self.merchant_id)
.attach_printable("Error adding merchant id filter")?;
}
if !self.card_last_4.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::CardLast4, &self.card_last_4)
.attach_printable("Error adding card last 4 filter")?;
}
if !self.card_issuer.is_empty() {
builder
.add_filter_in_range_clause(PaymentIntentDimensions::CardIssuer, &self.card_issuer)
.attach_printable("Error adding card issuer filter")?;
}
if !self.error_reason.is_empty() {
builder
.add_filter_in_range_clause(
PaymentIntentDimensions::ErrorReason,
&self.error_reason,
)
.attach_printable("Error adding error reason filter")?;
}
if !self.customer_id.is_empty() {
builder
.add_filter_in_range_clause("customer_id", &self.customer_id)
.attach_printable("Error adding customer id filter")?;
}
Ok(())
}
}
|
crates/analytics/src/payment_intents/types.rs
|
analytics::src::payment_intents::types
| 781
| true
|
// File: crates/analytics/src/payment_intents/metrics.rs
// Module: analytics::src::payment_intents::metrics
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetrics,
PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use diesel_models::enums as storage_enums;
use time::PrimitiveDateTime;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult},
};
mod payment_intent_count;
mod payment_processed_amount;
mod payments_success_rate;
mod sessionized_metrics;
mod smart_retried_amount;
mod successful_smart_retries;
mod total_smart_retries;
use payment_intent_count::PaymentIntentCount;
use payment_processed_amount::PaymentProcessedAmount;
use payments_success_rate::PaymentsSuccessRate;
use smart_retried_amount::SmartRetriedAmount;
use successful_smart_retries::SuccessfulSmartRetries;
use total_smart_retries::TotalSmartRetries;
#[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)]
pub struct PaymentIntentMetricRow {
pub status: Option<DBEnumWrapper<storage_enums::IntentStatus>>,
pub currency: Option<DBEnumWrapper<storage_enums::Currency>>,
pub profile_id: Option<String>,
pub connector: Option<String>,
pub authentication_type: Option<DBEnumWrapper<storage_enums::AuthenticationType>>,
pub payment_method: Option<String>,
pub payment_method_type: Option<String>,
pub card_network: Option<String>,
pub merchant_id: Option<String>,
pub card_last_4: Option<String>,
pub card_issuer: Option<String>,
pub error_reason: Option<String>,
pub first_attempt: Option<i64>,
pub total: Option<bigdecimal::BigDecimal>,
pub count: Option<i64>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
pub trait PaymentIntentMetricAnalytics: LoadRow<PaymentIntentMetricRow> {}
#[async_trait::async_trait]
pub trait PaymentIntentMetric<T>
where
T: AnalyticsDataSource + PaymentIntentMetricAnalytics,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> PaymentIntentMetric<T> for PaymentIntentMetrics
where
T: AnalyticsDataSource + PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
match self {
Self::SuccessfulSmartRetries => {
SuccessfulSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::TotalSmartRetries => {
TotalSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SmartRetriedAmount => {
SmartRetriedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentIntentCount => {
PaymentIntentCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentsSuccessRate => {
PaymentsSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::PaymentProcessedAmount => {
PaymentProcessedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedSuccessfulSmartRetries => {
sessionized_metrics::SuccessfulSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedTotalSmartRetries => {
sessionized_metrics::TotalSmartRetries
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedSmartRetriedAmount => {
sessionized_metrics::SmartRetriedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentIntentCount => {
sessionized_metrics::PaymentIntentCount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentsSuccessRate => {
sessionized_metrics::PaymentsSuccessRate
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentProcessedAmount => {
sessionized_metrics::PaymentProcessedAmount
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedPaymentsDistribution => {
sessionized_metrics::PaymentsDistribution
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
}
}
}
|
crates/analytics/src/payment_intents/metrics.rs
|
analytics::src::payment_intents::metrics
| 1,294
| true
|
// File: crates/analytics/src/payment_intents/sankey.rs
// Module: analytics::src::payment_intents::sankey
use common_enums::enums;
use common_utils::{
errors::ParsingError,
types::{authentication::AuthInfo, TimeRange},
};
use error_stack::ResultExt;
use router_env::logger;
use crate::{
clickhouse::ClickhouseClient,
query::{Aggregate, QueryBuilder, QueryFilter},
types::{AnalyticsCollection, DBEnumWrapper, MetricsError, MetricsResult},
};
#[derive(
Clone,
Copy,
Debug,
Default,
Eq,
Hash,
PartialEq,
serde::Deserialize,
serde::Serialize,
strum::Display,
strum::EnumIter,
strum::EnumString,
)]
#[serde(rename_all = "snake_case")]
pub enum SessionizerRefundStatus {
FullRefunded,
#[default]
NotRefunded,
PartialRefunded,
}
#[derive(
Clone,
Copy,
Debug,
Default,
Eq,
Hash,
PartialEq,
serde::Deserialize,
serde::Serialize,
strum::Display,
strum::EnumIter,
strum::EnumString,
)]
#[serde(rename_all = "snake_case")]
pub enum SessionizerDisputeStatus {
DisputePresent,
#[default]
NotDisputed,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct SankeyRow {
pub count: i64,
pub status: DBEnumWrapper<enums::IntentStatus>,
#[serde(default)]
pub refunds_status: Option<DBEnumWrapper<SessionizerRefundStatus>>,
#[serde(default)]
pub dispute_status: Option<DBEnumWrapper<SessionizerDisputeStatus>>,
pub first_attempt: i64,
}
impl TryInto<SankeyRow> for serde_json::Value {
type Error = error_stack::Report<ParsingError>;
fn try_into(self) -> Result<SankeyRow, Self::Error> {
logger::debug!("Parsing SankeyRow from {:?}", self);
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse Sankey in clickhouse results",
))
}
}
pub async fn get_sankey_data(
clickhouse_client: &ClickhouseClient,
auth: &AuthInfo,
time_range: &TimeRange,
) -> MetricsResult<Vec<SankeyRow>> {
let mut query_builder =
QueryBuilder::<ClickhouseClient>::new(AnalyticsCollection::PaymentIntentSessionized);
query_builder
.add_select_column(Aggregate::<String>::Count {
field: None,
alias: Some("count"),
})
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("status")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("refunds_status")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("dispute_status")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.attach_printable("Error adding select clause")
.change_context(MetricsError::QueryBuildingError)?;
auth.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
time_range
.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("status")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("refunds_status")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("dispute_status")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error adding group by clause")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.execute_query::<SankeyRow, _>(clickhouse_client)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(Ok)
.collect()
}
|
crates/analytics/src/payment_intents/sankey.rs
|
analytics::src::payment_intents::sankey
| 1,001
| true
|
// File: crates/analytics/src/payment_intents/filters.rs
// Module: analytics::src::payment_intents::filters
use api_models::analytics::{payment_intents::PaymentIntentDimensions, Granularity, TimeRange};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums::{AuthenticationType, Currency, IntentStatus};
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{
AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult,
LoadRow,
},
};
pub trait PaymentIntentFilterAnalytics: LoadRow<PaymentIntentFilterRow> {}
pub async fn get_payment_intent_filter_for_dimension<T>(
dimension: PaymentIntentDimensions,
merchant_id: &common_utils::id_type::MerchantId,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<PaymentIntentFilterRow>>
where
T: AnalyticsDataSource + PaymentIntentFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::PaymentIntent);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<PaymentIntentFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct PaymentIntentFilterRow {
pub status: Option<DBEnumWrapper<IntentStatus>>,
pub currency: Option<DBEnumWrapper<Currency>>,
pub profile_id: Option<String>,
pub connector: Option<String>,
pub authentication_type: Option<DBEnumWrapper<AuthenticationType>>,
pub payment_method: Option<String>,
pub payment_method_type: Option<String>,
pub card_network: Option<String>,
pub merchant_id: Option<String>,
pub card_last_4: Option<String>,
pub card_issuer: Option<String>,
pub error_reason: Option<String>,
pub customer_id: Option<String>,
}
|
crates/analytics/src/payment_intents/filters.rs
|
analytics::src::payment_intents::filters
| 552
| true
|
// File: crates/analytics/src/payment_intents/accumulator.rs
// Module: analytics::src::payment_intents::accumulator
use api_models::analytics::payment_intents::PaymentIntentMetricsBucketValue;
use bigdecimal::ToPrimitive;
use diesel_models::enums as storage_enums;
use super::metrics::PaymentIntentMetricRow;
#[derive(Debug, Default)]
pub struct PaymentIntentMetricsAccumulator {
pub successful_smart_retries: CountAccumulator,
pub total_smart_retries: CountAccumulator,
pub smart_retried_amount: SmartRetriedAmountAccumulator,
pub payment_intent_count: CountAccumulator,
pub payments_success_rate: PaymentsSuccessRateAccumulator,
pub payment_processed_amount: ProcessedAmountAccumulator,
pub payments_distribution: PaymentsDistributionAccumulator,
}
#[derive(Debug, Default)]
pub struct ErrorDistributionRow {
pub count: i64,
pub total: i64,
pub error_message: String,
}
#[derive(Debug, Default)]
pub struct ErrorDistributionAccumulator {
pub error_vec: Vec<ErrorDistributionRow>,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct CountAccumulator {
pub count: Option<i64>,
}
pub trait PaymentIntentMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow);
fn collect(self) -> Self::MetricOutput;
}
#[derive(Debug, Default)]
pub struct SmartRetriedAmountAccumulator {
pub amount: Option<i64>,
pub amount_without_retries: Option<i64>,
}
#[derive(Debug, Default)]
pub struct PaymentsSuccessRateAccumulator {
pub success: u32,
pub success_without_retries: u32,
pub total: u32,
}
#[derive(Debug, Default)]
pub struct ProcessedAmountAccumulator {
pub count_with_retries: Option<i64>,
pub total_with_retries: Option<i64>,
pub count_without_retries: Option<i64>,
pub total_without_retries: Option<i64>,
}
#[derive(Debug, Default)]
pub struct PaymentsDistributionAccumulator {
pub success_without_retries: u32,
pub failed_without_retries: u32,
pub total: u32,
}
impl PaymentIntentMetricAccumulator for CountAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl PaymentIntentMetricAccumulator for SmartRetriedAmountAccumulator {
type MetricOutput = (Option<u64>, Option<u64>, Option<u64>, Option<u64>);
#[inline]
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) {
self.amount = match (
self.amount,
metrics.total.as_ref().and_then(ToPrimitive::to_i64),
) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
};
if metrics.first_attempt.unwrap_or(0) == 1 {
self.amount_without_retries = match (
self.amount_without_retries,
metrics.total.as_ref().and_then(ToPrimitive::to_i64),
) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
} else {
self.amount_without_retries = Some(0);
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
let with_retries = self.amount.and_then(|i| u64::try_from(i).ok()).or(Some(0));
let without_retries = self
.amount_without_retries
.and_then(|i| u64::try_from(i).ok())
.or(Some(0));
(with_retries, without_retries, Some(0), Some(0))
}
}
impl PaymentIntentMetricAccumulator for PaymentsSuccessRateAccumulator {
type MetricOutput = (
Option<u32>,
Option<u32>,
Option<u32>,
Option<f64>,
Option<f64>,
);
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) {
if let Some(ref status) = metrics.status {
if status.as_ref() == &storage_enums::IntentStatus::Succeeded {
if let Some(success) = metrics
.count
.and_then(|success| u32::try_from(success).ok())
{
self.success += success;
if metrics.first_attempt.unwrap_or(0) == 1 {
self.success_without_retries += success;
}
}
}
if status.as_ref() != &storage_enums::IntentStatus::RequiresCustomerAction
&& status.as_ref() != &storage_enums::IntentStatus::RequiresPaymentMethod
&& status.as_ref() != &storage_enums::IntentStatus::RequiresMerchantAction
&& status.as_ref() != &storage_enums::IntentStatus::RequiresConfirmation
{
if let Some(total) = metrics.count.and_then(|total| u32::try_from(total).ok()) {
self.total += total;
}
}
}
}
fn collect(self) -> Self::MetricOutput {
if self.total == 0 {
(None, None, None, None, None)
} else {
let success = Some(self.success);
let success_without_retries = Some(self.success_without_retries);
let total = Some(self.total);
let success_rate = match (success, total) {
(Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
let success_without_retries_rate = match (success_without_retries, total) {
(Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
(
success,
success_without_retries,
total,
success_rate,
success_without_retries_rate,
)
}
}
}
impl PaymentIntentMetricAccumulator for ProcessedAmountAccumulator {
type MetricOutput = (
Option<u64>,
Option<u64>,
Option<u64>,
Option<u64>,
Option<u64>,
Option<u64>,
);
#[inline]
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) {
self.total_with_retries = match (
self.total_with_retries,
metrics.total.as_ref().and_then(ToPrimitive::to_i64),
) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
};
self.count_with_retries = match (self.count_with_retries, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
};
if metrics.first_attempt.unwrap_or(0) == 1 {
self.total_without_retries = match (
self.total_without_retries,
metrics.total.as_ref().and_then(ToPrimitive::to_i64),
) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
};
self.count_without_retries = match (self.count_without_retries, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
};
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
let total_with_retries = u64::try_from(self.total_with_retries.unwrap_or(0)).ok();
let count_with_retries = self.count_with_retries.and_then(|i| u64::try_from(i).ok());
let total_without_retries = u64::try_from(self.total_without_retries.unwrap_or(0)).ok();
let count_without_retries = self
.count_without_retries
.and_then(|i| u64::try_from(i).ok());
(
total_with_retries,
count_with_retries,
total_without_retries,
count_without_retries,
Some(0),
Some(0),
)
}
}
impl PaymentIntentMetricAccumulator for PaymentsDistributionAccumulator {
type MetricOutput = (Option<f64>, Option<f64>);
fn add_metrics_bucket(&mut self, metrics: &PaymentIntentMetricRow) {
let first_attempt = metrics.first_attempt.unwrap_or(0);
if let Some(ref status) = metrics.status {
if status.as_ref() == &storage_enums::IntentStatus::Succeeded {
if let Some(success) = metrics
.count
.and_then(|success| u32::try_from(success).ok())
{
if first_attempt == 1 {
self.success_without_retries += success;
}
}
}
if let Some(failed) = metrics.count.and_then(|failed| u32::try_from(failed).ok()) {
if first_attempt == 0
|| (first_attempt == 1
&& status.as_ref() == &storage_enums::IntentStatus::Failed)
{
self.failed_without_retries += failed;
}
}
if status.as_ref() != &storage_enums::IntentStatus::RequiresCustomerAction
&& status.as_ref() != &storage_enums::IntentStatus::RequiresPaymentMethod
&& status.as_ref() != &storage_enums::IntentStatus::RequiresMerchantAction
&& status.as_ref() != &storage_enums::IntentStatus::RequiresConfirmation
{
if let Some(total) = metrics.count.and_then(|total| u32::try_from(total).ok()) {
self.total += total;
}
}
}
}
fn collect(self) -> Self::MetricOutput {
if self.total == 0 {
(None, None)
} else {
let success_without_retries = Some(self.success_without_retries);
let failed_without_retries = Some(self.failed_without_retries);
let total = Some(self.total);
let success_rate_without_retries = match (success_without_retries, total) {
(Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
let failed_rate_without_retries = match (failed_without_retries, total) {
(Some(s), Some(t)) if t > 0 => Some(f64::from(s) * 100.0 / f64::from(t)),
_ => None,
};
(success_rate_without_retries, failed_rate_without_retries)
}
}
}
impl PaymentIntentMetricsAccumulator {
pub fn collect(self) -> PaymentIntentMetricsBucketValue {
let (
successful_payments,
successful_payments_without_smart_retries,
total_payments,
payments_success_rate,
payments_success_rate_without_smart_retries,
) = self.payments_success_rate.collect();
let (
smart_retried_amount,
smart_retried_amount_without_smart_retries,
smart_retried_amount_in_usd,
smart_retried_amount_without_smart_retries_in_usd,
) = self.smart_retried_amount.collect();
let (
payment_processed_amount,
payment_processed_count,
payment_processed_amount_without_smart_retries,
payment_processed_count_without_smart_retries,
payment_processed_amount_in_usd,
payment_processed_amount_without_smart_retries_in_usd,
) = self.payment_processed_amount.collect();
let (
payments_success_rate_distribution_without_smart_retries,
payments_failure_rate_distribution_without_smart_retries,
) = self.payments_distribution.collect();
PaymentIntentMetricsBucketValue {
successful_smart_retries: self.successful_smart_retries.collect(),
total_smart_retries: self.total_smart_retries.collect(),
smart_retried_amount,
smart_retried_amount_in_usd,
smart_retried_amount_without_smart_retries,
smart_retried_amount_without_smart_retries_in_usd,
payment_intent_count: self.payment_intent_count.collect(),
successful_payments,
successful_payments_without_smart_retries,
total_payments,
payments_success_rate,
payments_success_rate_without_smart_retries,
payment_processed_amount,
payment_processed_count,
payment_processed_amount_without_smart_retries,
payment_processed_count_without_smart_retries,
payments_success_rate_distribution_without_smart_retries,
payments_failure_rate_distribution_without_smart_retries,
payment_processed_amount_in_usd,
payment_processed_amount_without_smart_retries_in_usd,
}
}
}
|
crates/analytics/src/payment_intents/accumulator.rs
|
analytics::src::payment_intents::accumulator
| 3,002
| true
|
// File: crates/analytics/src/payment_intents/metrics/payments_success_rate.rs
// Module: analytics::src::payment_intents::metrics::payments_success_rate
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentsSuccessRate;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentsSuccessRate
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/payments_success_rate.rs
|
analytics::src::payment_intents::metrics::payments_success_rate
| 944
| true
|
// File: crates/analytics/src/payment_intents/metrics/total_smart_retries.rs
// Module: analytics::src::payment_intents::metrics::total_smart_retries
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct TotalSmartRetries;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for TotalSmartRetries
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/total_smart_retries.rs
|
analytics::src::payment_intents::metrics::total_smart_retries
| 971
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics
mod payment_intent_count;
mod payment_processed_amount;
mod payments_distribution;
mod payments_success_rate;
mod smart_retried_amount;
mod successful_smart_retries;
mod total_smart_retries;
pub(super) use payment_intent_count::PaymentIntentCount;
pub(super) use payment_processed_amount::PaymentProcessedAmount;
pub(super) use payments_distribution::PaymentsDistribution;
pub(super) use payments_success_rate::PaymentsSuccessRate;
pub(super) use smart_retried_amount::SmartRetriedAmount;
pub(super) use successful_smart_retries::SuccessfulSmartRetries;
pub(super) use total_smart_retries::TotalSmartRetries;
pub use super::{PaymentIntentMetric, PaymentIntentMetricAnalytics, PaymentIntentMetricRow};
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics
| 179
| true
|
// File: crates/analytics/src/payment_intents/metrics/payment_intent_count.rs
// Module: analytics::src::payment_intents::metrics::payment_intent_count
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentIntentCount;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentIntentCount
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/payment_intent_count.rs
|
analytics::src::payment_intents::metrics::payment_intent_count
| 932
| true
|
// File: crates/analytics/src/payment_intents/metrics/successful_smart_retries.rs
// Module: analytics::src::payment_intents::metrics::successful_smart_retries
use std::collections::HashSet;
use api_models::{
analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
},
enums::IntentStatus,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct SuccessfulSmartRetries;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for SuccessfulSmartRetries
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/successful_smart_retries.rs
|
analytics::src::payment_intents::metrics::successful_smart_retries
| 1,009
| true
|
// File: crates/analytics/src/payment_intents/metrics/smart_retried_amount.rs
// Module: analytics::src::payment_intents::metrics::smart_retried_amount
use std::collections::HashSet;
use api_models::{
analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
},
enums::IntentStatus,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct SmartRetriedAmount;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for SmartRetriedAmount
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/smart_retried_amount.rs
|
analytics::src::payment_intents::metrics::smart_retried_amount
| 1,051
| true
|
// File: crates/analytics/src/payment_intents/metrics/payment_processed_amount.rs
// Module: analytics::src::payment_intents::metrics::payment_processed_amount
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentProcessedAmount;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentProcessedAmount
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntent);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentIntentDimensions::PaymentIntentStatus,
storage_enums::IntentStatus::Succeeded,
)
.switch()?;
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/payment_processed_amount.rs
|
analytics::src::payment_intents::metrics::payment_processed_amount
| 1,067
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_success_rate.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::payments_success_rate
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentsSuccessRate;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentsSuccessRate
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt".to_string())
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_success_rate.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::payments_success_rate
| 1,012
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/total_smart_retries.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::total_smart_retries
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct TotalSmartRetries;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for TotalSmartRetries
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/total_smart_retries.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::total_smart_retries
| 980
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_distribution.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::payments_distribution
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentsDistribution;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentsDistribution
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/payments_distribution.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::payments_distribution
| 1,004
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_intent_count.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::payment_intent_count
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentIntentCount;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentIntentCount
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_intent_count.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::payment_intent_count
| 941
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/successful_smart_retries.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::successful_smart_retries
use std::collections::HashSet;
use api_models::{
analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
},
enums::IntentStatus,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct SuccessfulSmartRetries;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for SuccessfulSmartRetries
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/successful_smart_retries.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::successful_smart_retries
| 1,018
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/smart_retried_amount.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::smart_retried_amount
use std::collections::HashSet;
use api_models::{
analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
},
enums::IntentStatus,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct SmartRetriedAmount;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for SmartRetriedAmount
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_custom_filter_clause("attempt_count", "1", FilterTypes::Gt)
.switch()?;
query_builder
.add_custom_filter_clause("status", IntentStatus::Succeeded, FilterTypes::Equal)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
i.status.as_ref().map(|i| i.0),
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/smart_retried_amount.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::smart_retried_amount
| 1,115
| true
|
// File: crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_processed_amount.rs
// Module: analytics::src::payment_intents::metrics::sessionized_metrics::payment_processed_amount
use std::collections::HashSet;
use api_models::analytics::{
payment_intents::{
PaymentIntentDimensions, PaymentIntentFilters, PaymentIntentMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums as storage_enums;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::PaymentIntentMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct PaymentProcessedAmount;
#[async_trait::async_trait]
impl<T> super::PaymentIntentMetric<T> for PaymentProcessedAmount
where
T: AnalyticsDataSource + super::PaymentIntentMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[PaymentIntentDimensions],
auth: &AuthInfo,
filters: &PaymentIntentFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>>
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::PaymentIntentSessionized);
let mut dimensions = dimensions.to_vec();
dimensions.push(PaymentIntentDimensions::PaymentIntentStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column("(attempt_count = 1) as first_attempt")
.switch()?;
query_builder.add_select_column("currency").switch()?;
query_builder
.add_select_column(Aggregate::Sum {
field: "amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_group_by_clause("first_attempt")
.attach_printable("Error grouping by first_attempt")
.switch()?;
query_builder
.add_group_by_clause("currency")
.attach_printable("Error grouping by currency")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.add_filter_clause(
PaymentIntentDimensions::PaymentIntentStatus,
storage_enums::IntentStatus::Succeeded,
)
.switch()?;
query_builder
.execute_query::<PaymentIntentMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
PaymentIntentMetricsBucketIdentifier::new(
None,
i.currency.as_ref().map(|i| i.0),
i.profile_id.clone(),
i.connector.clone(),
i.authentication_type.as_ref().map(|i| i.0),
i.payment_method.clone(),
i.payment_method_type.clone(),
i.card_network.clone(),
i.merchant_id.clone(),
i.card_last_4.clone(),
i.card_issuer.clone(),
i.error_reason.clone(),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(PaymentIntentMetricsBucketIdentifier, PaymentIntentMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/payment_intents/metrics/sessionized_metrics/payment_processed_amount.rs
|
analytics::src::payment_intents::metrics::sessionized_metrics::payment_processed_amount
| 1,126
| true
|
// File: crates/analytics/src/active_payments/core.rs
// Module: analytics::src::active_payments::core
use std::collections::HashMap;
use api_models::analytics::{
active_payments::{
ActivePaymentsMetrics, ActivePaymentsMetricsBucketIdentifier, MetricsBucketResponse,
},
AnalyticsMetadata, GetActivePaymentsMetricRequest, MetricsResponse,
};
use error_stack::ResultExt;
use router_env::{instrument, logger, tracing};
use super::ActivePaymentsMetricsAccumulator;
use crate::{
active_payments::ActivePaymentsMetricAccumulator,
errors::{AnalyticsError, AnalyticsResult},
AnalyticsProvider,
};
#[instrument(skip_all)]
pub async fn get_metrics(
pool: &AnalyticsProvider,
publishable_key: &String,
merchant_id: &common_utils::id_type::MerchantId,
req: GetActivePaymentsMetricRequest,
) -> AnalyticsResult<MetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let publishable_key_scoped = publishable_key.to_owned();
let merchant_id_scoped = merchant_id.to_owned();
let pool = pool.clone();
set.spawn(async move {
let data = pool
.get_active_payments_metrics(
&metric_type,
&merchant_id_scoped,
&publishable_key_scoped,
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
});
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
logger::info!("Logging metric: {metric} Result: {:?}", data);
for (id, value) in data? {
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
ActivePaymentsMetrics::ActivePayments => {
metrics_builder.active_payments.add_metrics_bucket(&value)
}
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| MetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
|
crates/analytics/src/active_payments/core.rs
|
analytics::src::active_payments::core
| 580
| true
|
// File: crates/analytics/src/active_payments/metrics.rs
// Module: analytics::src::active_payments::metrics
use std::collections::HashSet;
use api_models::analytics::{
active_payments::{ActivePaymentsMetrics, ActivePaymentsMetricsBucketIdentifier},
Granularity, TimeRange,
};
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult},
};
mod active_payments;
use active_payments::ActivePayments;
#[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)]
pub struct ActivePaymentsMetricRow {
pub count: Option<i64>,
}
pub trait ActivePaymentsMetricAnalytics: LoadRow<ActivePaymentsMetricRow> {}
#[async_trait::async_trait]
pub trait ActivePaymentsMetric<T>
where
T: AnalyticsDataSource + ActivePaymentsMetricAnalytics,
{
async fn load_metrics(
&self,
merchant_id: &common_utils::id_type::MerchantId,
publishable_key: &str,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
>;
}
#[async_trait::async_trait]
impl<T> ActivePaymentsMetric<T> for ActivePaymentsMetrics
where
T: AnalyticsDataSource + ActivePaymentsMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
merchant_id: &common_utils::id_type::MerchantId,
publishable_key: &str,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
> {
match self {
Self::ActivePayments => {
ActivePayments
.load_metrics(merchant_id, publishable_key, time_range, pool)
.await
}
}
}
}
|
crates/analytics/src/active_payments/metrics.rs
|
analytics::src::active_payments::metrics
| 461
| true
|
// File: crates/analytics/src/active_payments/accumulator.rs
// Module: analytics::src::active_payments::accumulator
use api_models::analytics::active_payments::ActivePaymentsMetricsBucketValue;
use super::metrics::ActivePaymentsMetricRow;
#[derive(Debug, Default)]
pub struct ActivePaymentsMetricsAccumulator {
pub active_payments: CountAccumulator,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct CountAccumulator {
pub count: Option<i64>,
}
pub trait ActivePaymentsMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &ActivePaymentsMetricRow);
fn collect(self) -> Self::MetricOutput;
}
impl ActivePaymentsMetricAccumulator for CountAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &ActivePaymentsMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl ActivePaymentsMetricsAccumulator {
#[allow(dead_code)]
pub fn collect(self) -> ActivePaymentsMetricsBucketValue {
ActivePaymentsMetricsBucketValue {
active_payments: self.active_payments.collect(),
}
}
}
|
crates/analytics/src/active_payments/accumulator.rs
|
analytics::src::active_payments::accumulator
| 338
| true
|
// File: crates/analytics/src/active_payments/metrics/active_payments.rs
// Module: analytics::src::active_payments::metrics::active_payments
use std::collections::HashSet;
use api_models::analytics::{
active_payments::ActivePaymentsMetricsBucketIdentifier, Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::ActivePaymentsMetricRow;
use crate::{
query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct ActivePayments;
#[async_trait::async_trait]
impl<T> super::ActivePaymentsMetric<T> for ActivePayments
where
T: AnalyticsDataSource + super::ActivePaymentsMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
merchant_id: &common_utils::id_type::MerchantId,
publishable_key: &str,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::ActivePaymentsAnalytics);
query_builder
.add_select_column(Aggregate::DistinctCount {
field: "payment_id",
alias: Some("count"),
})
.switch()?;
query_builder
.add_custom_filter_clause(
"merchant_id",
format!("'{}','{}'", merchant_id.get_string_repr(), publishable_key),
FilterTypes::In,
)
.switch()?;
query_builder
.add_negative_filter_clause("payment_id", "")
.switch()?;
query_builder
.add_custom_filter_clause(
"flow_type",
"'sdk', 'payment', 'payment_redirection_response'",
FilterTypes::In,
)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.execute_query::<ActivePaymentsMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| Ok((ActivePaymentsMetricsBucketIdentifier::new(None), i)))
.collect::<error_stack::Result<
HashSet<(
ActivePaymentsMetricsBucketIdentifier,
ActivePaymentsMetricRow,
)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/active_payments/metrics/active_payments.rs
|
analytics::src::active_payments::metrics::active_payments
| 631
| true
|
// File: crates/analytics/src/sdk_events/core.rs
// Module: analytics::src::sdk_events::core
use std::collections::HashMap;
use api_models::analytics::{
sdk_events::{
MetricsBucketResponse, SdkEventMetrics, SdkEventMetricsBucketIdentifier, SdkEventsRequest,
},
AnalyticsMetadata, GetSdkEventFiltersRequest, GetSdkEventMetricRequest, MetricsResponse,
SdkEventFiltersResponse,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use router_env::{instrument, logger, tracing};
use super::{
events::{get_sdk_event, SdkEventsResult},
SdkEventMetricsAccumulator,
};
use crate::{
errors::{AnalyticsError, AnalyticsResult},
sdk_events::SdkEventMetricAccumulator,
types::FiltersError,
AnalyticsProvider,
};
#[instrument(skip_all)]
pub async fn sdk_events_core(
pool: &AnalyticsProvider,
req: SdkEventsRequest,
publishable_key: &String,
) -> AnalyticsResult<Vec<SdkEventsResult>> {
match pool {
AnalyticsProvider::Sqlx(_) => Err(FiltersError::NotImplemented(
"SDK Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for Sdk Events"),
AnalyticsProvider::Clickhouse(pool) => get_sdk_event(publishable_key, req, pool).await,
AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
| AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
get_sdk_event(publishable_key, req, ckh_pool).await
}
}
.switch()
}
#[instrument(skip_all)]
pub async fn get_metrics(
pool: &AnalyticsProvider,
publishable_key: &String,
req: GetSdkEventMetricRequest,
) -> AnalyticsResult<MetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
SdkEventMetricsBucketIdentifier,
SdkEventMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let publishable_key_scoped = publishable_key.to_owned();
let pool = pool.clone();
set.spawn(async move {
let data = pool
.get_sdk_event_metrics(
&metric_type,
&req.group_by_names.clone(),
&publishable_key_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
});
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
logger::info!("Logging Result {:?}", data);
for (id, value) in data? {
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
SdkEventMetrics::PaymentAttempts => {
metrics_builder.payment_attempts.add_metrics_bucket(&value)
}
SdkEventMetrics::PaymentMethodsCallCount => metrics_builder
.payment_methods_call_count
.add_metrics_bucket(&value),
SdkEventMetrics::SdkRenderedCount => metrics_builder
.sdk_rendered_count
.add_metrics_bucket(&value),
SdkEventMetrics::SdkInitiatedCount => metrics_builder
.sdk_initiated_count
.add_metrics_bucket(&value),
SdkEventMetrics::PaymentMethodSelectedCount => metrics_builder
.payment_method_selected_count
.add_metrics_bucket(&value),
SdkEventMetrics::PaymentDataFilledCount => metrics_builder
.payment_data_filled_count
.add_metrics_bucket(&value),
SdkEventMetrics::AveragePaymentTime => metrics_builder
.average_payment_time
.add_metrics_bucket(&value),
SdkEventMetrics::LoadTime => metrics_builder.load_time.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| MetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
#[allow(dead_code)]
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetSdkEventFiltersRequest,
publishable_key: &String,
) -> AnalyticsResult<SdkEventFiltersResponse> {
use api_models::analytics::{sdk_events::SdkEventDimensions, SdkEventFilterValue};
use super::filters::get_sdk_event_filter_for_dimension;
use crate::sdk_events::filters::SdkEventFilter;
let mut res = SdkEventFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(_pool) => Err(FiltersError::NotImplemented(
"SDK Events not implemented for SQLX",
))
.attach_printable("SQL Analytics is not implemented for SDK Events"),
AnalyticsProvider::Clickhouse(pool) => {
get_sdk_event_filter_for_dimension(dim, publishable_key, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedSqlx(_sqlx_pool, ckh_pool)
| AnalyticsProvider::CombinedCkh(_sqlx_pool, ckh_pool) => {
get_sdk_event_filter_for_dimension(dim, publishable_key, &req.time_range, ckh_pool)
.await
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: SdkEventFilter| match dim {
SdkEventDimensions::PaymentMethod => fil.payment_method,
SdkEventDimensions::Platform => fil.platform,
SdkEventDimensions::BrowserName => fil.browser_name,
SdkEventDimensions::Source => fil.source,
SdkEventDimensions::Component => fil.component,
SdkEventDimensions::PaymentExperience => fil.payment_experience,
})
.collect::<Vec<String>>();
res.query_data.push(SdkEventFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
|
crates/analytics/src/sdk_events/core.rs
|
analytics::src::sdk_events::core
| 1,403
| true
|
// File: crates/analytics/src/sdk_events/types.rs
// Module: analytics::src::sdk_events::types
use api_models::analytics::sdk_events::{SdkEventDimensions, SdkEventFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for SdkEventFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.payment_method.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::PaymentMethod, &self.payment_method)
.attach_printable("Error adding payment method filter")?;
}
if !self.platform.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::Platform, &self.platform)
.attach_printable("Error adding platform filter")?;
}
if !self.browser_name.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::BrowserName, &self.browser_name)
.attach_printable("Error adding browser name filter")?;
}
if !self.source.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::Source, &self.source)
.attach_printable("Error adding source filter")?;
}
if !self.component.is_empty() {
builder
.add_filter_in_range_clause(SdkEventDimensions::Component, &self.component)
.attach_printable("Error adding component filter")?;
}
if !self.payment_experience.is_empty() {
builder
.add_filter_in_range_clause(
SdkEventDimensions::PaymentExperience,
&self.payment_experience,
)
.attach_printable("Error adding payment experience filter")?;
}
Ok(())
}
}
|
crates/analytics/src/sdk_events/types.rs
|
analytics::src::sdk_events::types
| 414
| true
|
// File: crates/analytics/src/sdk_events/events.rs
// Module: analytics::src::sdk_events::events
use api_models::analytics::{
sdk_events::{SdkEventNames, SdkEventsRequest},
Granularity,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use strum::IntoEnumIterator;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
};
pub trait SdkEventsFilterAnalytics: LoadRow<SdkEventsResult> {}
pub async fn get_sdk_event<T>(
publishable_key: &String,
request: SdkEventsRequest,
pool: &T,
) -> FiltersResult<Vec<SdkEventsResult>>
where
T: AnalyticsDataSource + SdkEventsFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let static_event_list = SdkEventNames::iter()
.map(|i| format!("'{}'", i.as_ref()))
.collect::<Vec<String>>()
.join(",");
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::SdkEvents);
query_builder.add_select_column("*").switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_filter_clause("payment_id", &request.payment_id)
.switch()?;
query_builder
.add_custom_filter_clause("event_name", static_event_list, FilterTypes::In)
.switch()?;
let _ = &request
.time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
//TODO!: update the execute_query function to return reports instead of plain errors...
query_builder
.execute_query::<SdkEventsResult, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct SdkEventsResult {
pub merchant_id: common_utils::id_type::MerchantId,
pub payment_id: common_utils::id_type::PaymentId,
pub event_name: Option<String>,
pub log_type: Option<String>,
pub first_event: bool,
pub browser_name: Option<String>,
pub browser_version: Option<String>,
pub source: Option<String>,
pub category: Option<String>,
pub version: Option<String>,
pub value: Option<String>,
pub platform: Option<String>,
pub component: Option<String>,
pub payment_method: Option<String>,
pub payment_experience: Option<String>,
pub latency: Option<u64>,
#[serde(with = "common_utils::custom_serde::iso8601")]
pub created_at_precise: PrimitiveDateTime,
#[serde(with = "common_utils::custom_serde::iso8601")]
pub created_at: PrimitiveDateTime,
}
|
crates/analytics/src/sdk_events/events.rs
|
analytics::src::sdk_events::events
| 696
| true
|
// File: crates/analytics/src/sdk_events/metrics.rs
// Module: analytics::src::sdk_events::metrics
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetrics, SdkEventMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, LoadRow, MetricsResult},
};
mod average_payment_time;
mod load_time;
mod payment_attempts;
mod payment_data_filled_count;
mod payment_method_selected_count;
mod payment_methods_call_count;
mod sdk_initiated_count;
mod sdk_rendered_count;
use average_payment_time::AveragePaymentTime;
use load_time::LoadTime;
use payment_attempts::PaymentAttempts;
use payment_data_filled_count::PaymentDataFilledCount;
use payment_method_selected_count::PaymentMethodSelectedCount;
use payment_methods_call_count::PaymentMethodsCallCount;
use sdk_initiated_count::SdkInitiatedCount;
use sdk_rendered_count::SdkRenderedCount;
#[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)]
pub struct SdkEventMetricRow {
pub total: Option<bigdecimal::BigDecimal>,
pub count: Option<i64>,
pub time_bucket: Option<String>,
pub payment_method: Option<String>,
pub platform: Option<String>,
pub browser_name: Option<String>,
pub source: Option<String>,
pub component: Option<String>,
pub payment_experience: Option<String>,
}
pub trait SdkEventMetricAnalytics: LoadRow<SdkEventMetricRow> {}
#[async_trait::async_trait]
pub trait SdkEventMetric<T>
where
T: AnalyticsDataSource + SdkEventMetricAnalytics,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> SdkEventMetric<T> for SdkEventMetrics
where
T: AnalyticsDataSource + SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
match self {
Self::PaymentAttempts => {
PaymentAttempts
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::PaymentMethodsCallCount => {
PaymentMethodsCallCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::SdkRenderedCount => {
SdkRenderedCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::SdkInitiatedCount => {
SdkInitiatedCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::PaymentMethodSelectedCount => {
PaymentMethodSelectedCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::PaymentDataFilledCount => {
PaymentDataFilledCount
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::AveragePaymentTime => {
AveragePaymentTime
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::LoadTime => {
LoadTime
.load_metrics(
dimensions,
publishable_key,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
}
|
crates/analytics/src/sdk_events/metrics.rs
|
analytics::src::sdk_events::metrics
| 1,010
| true
|
// File: crates/analytics/src/sdk_events/filters.rs
// Module: analytics::src::sdk_events::filters
use api_models::analytics::{sdk_events::SdkEventDimensions, Granularity, TimeRange};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, FiltersError, FiltersResult, LoadRow},
};
pub trait SdkEventFilterAnalytics: LoadRow<SdkEventFilter> {}
pub async fn get_sdk_event_filter_for_dimension<T>(
dimension: SdkEventDimensions,
publishable_key: &String,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<SdkEventFilter>>
where
T: AnalyticsDataSource + SdkEventFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<SdkEventFilter, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct SdkEventFilter {
pub payment_method: Option<String>,
pub platform: Option<String>,
pub browser_name: Option<String>,
pub source: Option<String>,
pub component: Option<String>,
pub payment_experience: Option<String>,
}
|
crates/analytics/src/sdk_events/filters.rs
|
analytics::src::sdk_events::filters
| 447
| true
|
// File: crates/analytics/src/sdk_events/accumulator.rs
// Module: analytics::src::sdk_events::accumulator
use api_models::analytics::sdk_events::SdkEventMetricsBucketValue;
use router_env::logger;
use super::metrics::SdkEventMetricRow;
#[derive(Debug, Default)]
pub struct SdkEventMetricsAccumulator {
pub payment_attempts: CountAccumulator,
pub payment_methods_call_count: CountAccumulator,
pub average_payment_time: CountAccumulator,
pub load_time: CountAccumulator,
pub sdk_initiated_count: CountAccumulator,
pub sdk_rendered_count: CountAccumulator,
pub payment_method_selected_count: CountAccumulator,
pub payment_data_filled_count: CountAccumulator,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct CountAccumulator {
pub count: Option<i64>,
}
#[derive(Debug, Default)]
pub struct AverageAccumulator {
pub total: u32,
pub count: u32,
}
pub trait SdkEventMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow);
fn collect(self) -> Self::MetricOutput;
}
impl SdkEventMetricAccumulator for CountAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl SdkEventMetricAccumulator for AverageAccumulator {
type MetricOutput = Option<f64>;
fn add_metrics_bucket(&mut self, metrics: &SdkEventMetricRow) {
let total = metrics
.total
.as_ref()
.and_then(bigdecimal::ToPrimitive::to_u32);
let count = metrics.count.and_then(|total| u32::try_from(total).ok());
match (total, count) {
(Some(total), Some(count)) => {
self.total += total;
self.count += count;
}
_ => {
logger::error!(message="Dropping metrics for average accumulator", metric=?metrics);
}
}
}
fn collect(self) -> Self::MetricOutput {
if self.count == 0 {
None
} else {
Some(f64::from(self.total) / f64::from(self.count))
}
}
}
impl SdkEventMetricsAccumulator {
#[allow(dead_code)]
pub fn collect(self) -> SdkEventMetricsBucketValue {
SdkEventMetricsBucketValue {
payment_attempts: self.payment_attempts.collect(),
payment_methods_call_count: self.payment_methods_call_count.collect(),
average_payment_time: self.average_payment_time.collect(),
load_time: self.load_time.collect(),
sdk_initiated_count: self.sdk_initiated_count.collect(),
sdk_rendered_count: self.sdk_rendered_count.collect(),
payment_method_selected_count: self.payment_method_selected_count.collect(),
payment_data_filled_count: self.payment_data_filled_count.collect(),
}
}
}
|
crates/analytics/src/sdk_events/accumulator.rs
|
analytics::src::sdk_events::accumulator
| 748
| true
|
// File: crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs
// Module: analytics::src::sdk_events::metrics::payment_method_selected_count
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentMethodSelectedCount;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for PaymentMethodSelectedCount
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentMethodChanged)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/payment_method_selected_count.rs
|
analytics::src::sdk_events::metrics::payment_method_selected_count
| 816
| true
|
// File: crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs
// Module: analytics::src::sdk_events::metrics::payment_data_filled_count
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentDataFilledCount;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for PaymentDataFilledCount
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentDataFilled)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/payment_data_filled_count.rs
|
analytics::src::sdk_events::metrics::payment_data_filled_count
| 816
| true
|
// File: crates/analytics/src/sdk_events/metrics/load_time.rs
// Module: analytics::src::sdk_events::metrics::load_time
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct LoadTime;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for LoadTime
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Percentile {
field: "latency",
alias: Some("count"),
percentile: Some(&50),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::AppRendered)
.switch()?;
query_builder
.add_custom_filter_clause("latency", 0, FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/load_time.rs
|
analytics::src::sdk_events::metrics::load_time
| 850
| true
|
// File: crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs
// Module: analytics::src::sdk_events::metrics::sdk_rendered_count
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct SdkRenderedCount;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for SdkRenderedCount
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::AppRendered)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/sdk_rendered_count.rs
|
analytics::src::sdk_events::metrics::sdk_rendered_count
| 818
| true
|
// File: crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs
// Module: analytics::src::sdk_events::metrics::payment_methods_call_count
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentMethodsCallCount;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for PaymentMethodsCallCount
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentMethodsCall)
.switch()?;
query_builder
.add_filter_clause("log_type", "INFO")
.switch()?;
query_builder
.add_filter_clause("category", "API")
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/payment_methods_call_count.rs
|
analytics::src::sdk_events::metrics::payment_methods_call_count
| 857
| true
|
// File: crates/analytics/src/sdk_events/metrics/average_payment_time.rs
// Module: analytics::src::sdk_events::metrics::average_payment_time
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct AveragePaymentTime;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for AveragePaymentTime
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Percentile {
field: "latency",
alias: Some("count"),
percentile: Some(&50),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentAttempt)
.switch()?;
query_builder
.add_custom_filter_clause("latency", 0, FilterTypes::Gt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/average_payment_time.rs
|
analytics::src::sdk_events::metrics::average_payment_time
| 854
| true
|
// File: crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs
// Module: analytics::src::sdk_events::metrics::sdk_initiated_count
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct SdkInitiatedCount;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for SdkInitiatedCount
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::OrcaElementsCalled)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/sdk_initiated_count.rs
|
analytics::src::sdk_events::metrics::sdk_initiated_count
| 819
| true
|
// File: crates/analytics/src/sdk_events/metrics/payment_attempts.rs
// Module: analytics::src::sdk_events::metrics::payment_attempts
use std::collections::HashSet;
use api_models::analytics::{
sdk_events::{
SdkEventDimensions, SdkEventFilters, SdkEventMetricsBucketIdentifier, SdkEventNames,
},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::SdkEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct PaymentAttempts;
#[async_trait::async_trait]
impl<T> super::SdkEventMetric<T> for PaymentAttempts
where
T: AnalyticsDataSource + super::SdkEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[SdkEventDimensions],
publishable_key: &str,
filters: &SdkEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::SdkEventsAnalytics);
let dimensions = dimensions.to_vec();
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
if let Some(granularity) = granularity {
query_builder
.add_granularity_in_mins(granularity)
.switch()?;
}
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", publishable_key)
.switch()?;
query_builder
.add_bool_filter_clause("first_event", 1)
.switch()?;
query_builder
.add_filter_clause("event_name", SdkEventNames::PaymentAttempt)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(_granularity) = granularity.as_ref() {
query_builder
.add_group_by_clause("time_bucket")
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<SdkEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
SdkEventMetricsBucketIdentifier::new(
i.payment_method.clone(),
i.platform.clone(),
i.browser_name.clone(),
i.source.clone(),
i.component.clone(),
i.payment_experience.clone(),
i.time_bucket.clone(),
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(SdkEventMetricsBucketIdentifier, SdkEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/sdk_events/metrics/payment_attempts.rs
|
analytics::src::sdk_events::metrics::payment_attempts
| 807
| true
|
// File: crates/analytics/src/disputes/core.rs
// Module: analytics::src::disputes::core
use std::collections::HashMap;
use api_models::analytics::{
disputes::{
DisputeDimensions, DisputeMetrics, DisputeMetricsBucketIdentifier,
DisputeMetricsBucketResponse,
},
DisputeFilterValue, DisputeFiltersResponse, DisputesAnalyticsMetadata, DisputesMetricsResponse,
GetDisputeFilterRequest, GetDisputeMetricRequest,
};
use error_stack::ResultExt;
use router_env::{
logger,
tracing::{self, Instrument},
};
use super::{
filters::{get_dispute_filter_for_dimension, DisputeFilterRow},
DisputeMetricsAccumulator,
};
use crate::{
disputes::DisputeMetricAccumulator,
enums::AuthInfo,
errors::{AnalyticsError, AnalyticsResult},
metrics, AnalyticsProvider,
};
pub async fn get_metrics(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: GetDisputeMetricRequest,
) -> AnalyticsResult<DisputesMetricsResponse<DisputeMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
DisputeMetricsBucketIdentifier,
DisputeMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span = tracing::debug_span!(
"analytics_dispute_query",
refund_metric = metric_type.as_ref()
);
// Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference
// We can optimize away this clone once that is fixed
let auth_scoped = auth.to_owned();
set.spawn(
async move {
let data = pool
.get_dispute_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
DisputeMetrics::DisputeStatusMetric
| DisputeMetrics::SessionizedDisputeStatusMetric => metrics_builder
.disputes_status_rate
.add_metrics_bucket(&value),
DisputeMetrics::TotalAmountDisputed
| DisputeMetrics::SessionizedTotalAmountDisputed => {
metrics_builder.disputed_amount.add_metrics_bucket(&value)
}
DisputeMetrics::TotalDisputeLostAmount
| DisputeMetrics::SessionizedTotalDisputeLostAmount => metrics_builder
.dispute_lost_amount
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let mut total_disputed_amount = 0;
let mut total_dispute_lost_amount = 0;
let query_data: Vec<DisputeMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let collected_values = val.collect();
if let Some(amount) = collected_values.disputed_amount {
total_disputed_amount += amount;
}
if let Some(amount) = collected_values.dispute_lost_amount {
total_dispute_lost_amount += amount;
}
DisputeMetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
Ok(DisputesMetricsResponse {
query_data,
meta_data: [DisputesAnalyticsMetadata {
total_disputed_amount: Some(total_disputed_amount),
total_dispute_lost_amount: Some(total_dispute_lost_amount),
}],
})
}
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetDisputeFilterRequest,
auth: &AuthInfo,
) -> AnalyticsResult<DisputeFiltersResponse> {
let mut res = DisputeFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_dispute_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_dispute_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => {
let ckh_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres disputes analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_dispute_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres disputes analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: DisputeFilterRow| match dim {
DisputeDimensions::DisputeStage => fil.dispute_stage,
DisputeDimensions::Connector => fil.connector,
DisputeDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
})
.collect::<Vec<String>>();
res.query_data.push(DisputeFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
|
crates/analytics/src/disputes/core.rs
|
analytics::src::disputes::core
| 1,615
| true
|
// File: crates/analytics/src/disputes/types.rs
// Module: analytics::src::disputes::types
use api_models::analytics::disputes::{DisputeDimensions, DisputeFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for DisputeFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.connector.is_empty() {
builder
.add_filter_in_range_clause(DisputeDimensions::Connector, &self.connector)
.attach_printable("Error adding connector filter")?;
}
if !self.dispute_stage.is_empty() {
builder
.add_filter_in_range_clause(DisputeDimensions::DisputeStage, &self.dispute_stage)
.attach_printable("Error adding dispute stage filter")?;
}
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(DisputeDimensions::Currency, &self.currency)
.attach_printable("Error adding currency filter")?;
}
Ok(())
}
}
|
crates/analytics/src/disputes/types.rs
|
analytics::src::disputes::types
| 273
| true
|
// File: crates/analytics/src/disputes/metrics.rs
// Module: analytics::src::disputes::metrics
mod dispute_status_metric;
mod sessionized_metrics;
mod total_amount_disputed;
mod total_dispute_lost_amount;
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetrics, DisputeMetricsBucketIdentifier},
Granularity,
};
use common_utils::types::TimeRange;
use diesel_models::enums as storage_enums;
use time::PrimitiveDateTime;
use self::{
dispute_status_metric::DisputeStatusMetric, total_amount_disputed::TotalAmountDisputed,
total_dispute_lost_amount::TotalDisputeLostAmount,
};
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult},
};
#[derive(Debug, Eq, PartialEq, serde::Deserialize, Hash)]
pub struct DisputeMetricRow {
pub dispute_stage: Option<DBEnumWrapper<storage_enums::DisputeStage>>,
pub dispute_status: Option<DBEnumWrapper<storage_enums::DisputeStatus>>,
pub connector: Option<String>,
pub currency: Option<DBEnumWrapper<storage_enums::Currency>>,
pub total: Option<bigdecimal::BigDecimal>,
pub count: Option<i64>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
pub trait DisputeMetricAnalytics: LoadRow<DisputeMetricRow> {}
#[async_trait::async_trait]
pub trait DisputeMetric<T>
where
T: AnalyticsDataSource + DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> DisputeMetric<T> for DisputeMetrics
where
T: AnalyticsDataSource + DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>> {
match self {
Self::TotalAmountDisputed => {
TotalAmountDisputed::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::DisputeStatusMetric => {
DisputeStatusMetric::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::TotalDisputeLostAmount => {
TotalDisputeLostAmount::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedTotalAmountDisputed => {
sessionized_metrics::TotalAmountDisputed::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedDisputeStatusMetric => {
sessionized_metrics::DisputeStatusMetric::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
Self::SessionizedTotalDisputeLostAmount => {
sessionized_metrics::TotalDisputeLostAmount::default()
.load_metrics(dimensions, auth, filters, granularity, time_range, pool)
.await
}
}
}
}
|
crates/analytics/src/disputes/metrics.rs
|
analytics::src::disputes::metrics
| 959
| true
|
// File: crates/analytics/src/disputes/filters.rs
// Module: analytics::src::disputes::filters
use api_models::analytics::{disputes::DisputeDimensions, Granularity, TimeRange};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums::Currency;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{
AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult,
LoadRow,
},
};
pub trait DisputeFilterAnalytics: LoadRow<DisputeFilterRow> {}
pub async fn get_dispute_filter_for_dimension<T>(
dimension: DisputeDimensions,
auth: &AuthInfo,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<DisputeFilterRow>>
where
T: AnalyticsDataSource + DisputeFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<DisputeFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct DisputeFilterRow {
pub connector: Option<String>,
pub dispute_status: Option<String>,
pub connector_status: Option<String>,
pub dispute_stage: Option<String>,
pub currency: Option<DBEnumWrapper<Currency>>,
}
|
crates/analytics/src/disputes/filters.rs
|
analytics::src::disputes::filters
| 458
| true
|
// File: crates/analytics/src/disputes/accumulators.rs
// Module: analytics::src::disputes::accumulators
use api_models::analytics::disputes::DisputeMetricsBucketValue;
use diesel_models::enums as storage_enums;
use super::metrics::DisputeMetricRow;
#[derive(Debug, Default)]
pub struct DisputeMetricsAccumulator {
pub disputes_status_rate: RateAccumulator,
pub disputed_amount: DisputedAmountAccumulator,
pub dispute_lost_amount: DisputedAmountAccumulator,
}
#[derive(Debug, Default)]
pub struct RateAccumulator {
pub won_count: i64,
pub challenged_count: i64,
pub lost_count: i64,
pub total: i64,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct DisputedAmountAccumulator {
pub total: Option<i64>,
}
pub trait DisputeMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow);
fn collect(self) -> Self::MetricOutput;
}
impl DisputeMetricAccumulator for DisputedAmountAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow) {
self.total = match (
self.total,
metrics
.total
.as_ref()
.and_then(bigdecimal::ToPrimitive::to_i64),
) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.total.and_then(|i| u64::try_from(i).ok())
}
}
impl DisputeMetricAccumulator for RateAccumulator {
type MetricOutput = Option<(Option<u64>, Option<u64>, Option<u64>, Option<u64>)>;
fn add_metrics_bucket(&mut self, metrics: &DisputeMetricRow) {
if let Some(ref dispute_status) = metrics.dispute_status {
if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeChallenged {
self.challenged_count += metrics.count.unwrap_or_default();
}
if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeWon {
self.won_count += metrics.count.unwrap_or_default();
}
if dispute_status.as_ref() == &storage_enums::DisputeStatus::DisputeLost {
self.lost_count += metrics.count.unwrap_or_default();
}
};
self.total += metrics.count.unwrap_or_default();
}
fn collect(self) -> Self::MetricOutput {
if self.total <= 0 {
Some((None, None, None, None))
} else {
Some((
u64::try_from(self.challenged_count).ok(),
u64::try_from(self.won_count).ok(),
u64::try_from(self.lost_count).ok(),
u64::try_from(self.total).ok(),
))
}
}
}
impl DisputeMetricsAccumulator {
pub fn collect(self) -> DisputeMetricsBucketValue {
let (challenge_rate, won_rate, lost_rate, total_dispute) =
self.disputes_status_rate.collect().unwrap_or_default();
DisputeMetricsBucketValue {
disputes_challenged: challenge_rate,
disputes_won: won_rate,
disputes_lost: lost_rate,
disputed_amount: self.disputed_amount.collect(),
dispute_lost_amount: self.dispute_lost_amount.collect(),
total_dispute,
}
}
}
|
crates/analytics/src/disputes/accumulators.rs
|
analytics::src::disputes::accumulators
| 818
| true
|
// File: crates/analytics/src/disputes/metrics/total_dispute_lost_amount.rs
// Module: analytics::src::disputes::metrics::total_dispute_lost_amount
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::DisputeMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct TotalDisputeLostAmount {}
#[async_trait::async_trait]
impl<T> super::DisputeMetric<T> for TotalDisputeLostAmount
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_lost")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/disputes/metrics/total_dispute_lost_amount.rs
|
analytics::src::disputes::metrics::total_dispute_lost_amount
| 871
| true
|
// File: crates/analytics/src/disputes/metrics/sessionized_metrics.rs
// Module: analytics::src::disputes::metrics::sessionized_metrics
mod dispute_status_metric;
mod total_amount_disputed;
mod total_dispute_lost_amount;
pub(super) use dispute_status_metric::DisputeStatusMetric;
pub(super) use total_amount_disputed::TotalAmountDisputed;
pub(super) use total_dispute_lost_amount::TotalDisputeLostAmount;
pub use super::{DisputeMetric, DisputeMetricAnalytics, DisputeMetricRow};
|
crates/analytics/src/disputes/metrics/sessionized_metrics.rs
|
analytics::src::disputes::metrics::sessionized_metrics
| 113
| true
|
// File: crates/analytics/src/disputes/metrics/dispute_status_metric.rs
// Module: analytics::src::disputes::metrics::dispute_status_metric
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::DisputeMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct DisputeStatusMetric {}
#[async_trait::async_trait]
impl<T> super::DisputeMetric<T> for DisputeStatusMetric
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::Dispute);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder.add_select_column("dispute_status").switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions {
query_builder.add_group_by_clause(dim).switch()?;
}
query_builder
.add_group_by_clause("dispute_status")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/disputes/metrics/dispute_status_metric.rs
|
analytics::src::disputes::metrics::dispute_status_metric
| 863
| true
|
// File: crates/analytics/src/disputes/metrics/total_amount_disputed.rs
// Module: analytics::src::disputes::metrics::total_amount_disputed
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::DisputeMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct TotalAmountDisputed {}
#[async_trait::async_trait]
impl<T> super::DisputeMetric<T> for TotalAmountDisputed
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::Dispute);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_won")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/disputes/metrics/total_amount_disputed.rs
|
analytics::src::disputes::metrics::total_amount_disputed
| 866
| true
|
// File: crates/analytics/src/disputes/metrics/sessionized_metrics/total_dispute_lost_amount.rs
// Module: analytics::src::disputes::metrics::sessionized_metrics::total_dispute_lost_amount
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::DisputeMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct TotalDisputeLostAmount {}
#[async_trait::async_trait]
impl<T> super::DisputeMetric<T> for TotalDisputeLostAmount
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::DisputeSessionized);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_lost")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/disputes/metrics/sessionized_metrics/total_dispute_lost_amount.rs
|
analytics::src::disputes::metrics::sessionized_metrics::total_dispute_lost_amount
| 881
| true
|
// File: crates/analytics/src/disputes/metrics/sessionized_metrics/dispute_status_metric.rs
// Module: analytics::src::disputes::metrics::sessionized_metrics::dispute_status_metric
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::DisputeMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct DisputeStatusMetric {}
#[async_trait::async_trait]
impl<T> super::DisputeMetric<T> for DisputeStatusMetric
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::DisputeSessionized);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder.add_select_column("dispute_status").switch()?;
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions {
query_builder.add_group_by_clause(dim).switch()?;
}
query_builder
.add_group_by_clause("dispute_status")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/disputes/metrics/sessionized_metrics/dispute_status_metric.rs
|
analytics::src::disputes::metrics::sessionized_metrics::dispute_status_metric
| 872
| true
|
// File: crates/analytics/src/disputes/metrics/sessionized_metrics/total_amount_disputed.rs
// Module: analytics::src::disputes::metrics::sessionized_metrics::total_amount_disputed
use std::collections::HashSet;
use api_models::analytics::{
disputes::{DisputeDimensions, DisputeFilters, DisputeMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::DisputeMetricRow;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(crate) struct TotalAmountDisputed {}
#[async_trait::async_trait]
impl<T> super::DisputeMetric<T> for TotalAmountDisputed
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[DisputeDimensions],
auth: &AuthInfo,
filters: &DisputeFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(DisputeMetricsBucketIdentifier, DisputeMetricRow)>>
where
T: AnalyticsDataSource + super::DisputeMetricAnalytics,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::DisputeSessionized);
for dim in dimensions {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Sum {
field: "dispute_amount",
alias: Some("total"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.add_filter_clause("dispute_status", "dispute_won")
.switch()?;
query_builder
.execute_query::<DisputeMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
DisputeMetricsBucketIdentifier::new(
i.dispute_stage.as_ref().map(|i| i.0),
i.connector.clone(),
i.currency.as_ref().map(|i| i.0),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<HashSet<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/disputes/metrics/sessionized_metrics/total_amount_disputed.rs
|
analytics::src::disputes::metrics::sessionized_metrics::total_amount_disputed
| 876
| true
|
// File: crates/analytics/src/frm/core.rs
// Module: analytics::src::frm::core
#![allow(dead_code)]
use std::collections::HashMap;
use api_models::analytics::{
frm::{FrmDimensions, FrmMetrics, FrmMetricsBucketIdentifier, FrmMetricsBucketResponse},
AnalyticsMetadata, FrmFilterValue, FrmFiltersResponse, GetFrmFilterRequest,
GetFrmMetricRequest, MetricsResponse,
};
use error_stack::ResultExt;
use router_env::{
logger,
tracing::{self, Instrument},
};
use super::{
filters::{get_frm_filter_for_dimension, FrmFilterRow},
FrmMetricsAccumulator,
};
use crate::{
errors::{AnalyticsError, AnalyticsResult},
frm::FrmMetricAccumulator,
metrics, AnalyticsProvider,
};
pub async fn get_metrics(
pool: &AnalyticsProvider,
merchant_id: &common_utils::id_type::MerchantId,
req: GetFrmMetricRequest,
) -> AnalyticsResult<MetricsResponse<FrmMetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<FrmMetricsBucketIdentifier, FrmMetricsAccumulator> =
HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let pool = pool.clone();
let task_span =
tracing::debug_span!("analytics_frm_query", frm_metric = metric_type.as_ref());
// Currently JoinSet works with only static lifetime references even if the task pool does not outlive the given reference
// We can optimize away this clone once that is fixed
let merchant_id_scoped = merchant_id.to_owned();
set.spawn(
async move {
let data = pool
.get_frm_metrics(
&metric_type,
&req.group_by_names.clone(),
&merchant_id_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
}
.instrument(task_span),
);
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
let data = data?;
let attributes = router_env::metric_attributes!(
("metric_type", metric.to_string()),
("source", pool.to_string()),
);
let value = u64::try_from(data.len());
if let Ok(val) = value {
metrics::BUCKETS_FETCHED.record(val, attributes);
logger::debug!("Attributes: {:?}, Buckets fetched: {}", attributes, val);
}
for (id, value) in data {
logger::debug!(bucket_id=?id, bucket_value=?value, "Bucket row for metric {metric}");
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
FrmMetrics::FrmBlockedRate => {
metrics_builder.frm_blocked_rate.add_metrics_bucket(&value)
}
FrmMetrics::FrmTriggeredAttempts => metrics_builder
.frm_triggered_attempts
.add_metrics_bucket(&value),
}
}
logger::debug!(
"Analytics Accumulated Results: metric: {}, results: {:#?}",
metric,
metrics_accumulator
);
}
let query_data: Vec<FrmMetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| FrmMetricsBucketResponse {
values: val.collect(),
dimensions: id,
})
.collect();
Ok(MetricsResponse {
query_data,
meta_data: [AnalyticsMetadata {
current_time_range: req.time_range,
}],
})
}
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetFrmFilterRequest,
merchant_id: &common_utils::id_type::MerchantId,
) -> AnalyticsResult<FrmFiltersResponse> {
let mut res = FrmFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(pool) => {
get_frm_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::Clickhouse(pool) => {
get_frm_filter_for_dimension(dim, merchant_id, &req.time_range, pool)
.await
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) => {
let ckh_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics filters")
},
_ => {}
};
ckh_result
}
AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
ckh_pool,
)
.await;
let sqlx_result = get_frm_filter_for_dimension(
dim,
merchant_id,
&req.time_range,
sqlx_pool,
)
.await;
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres frm analytics filters")
},
_ => {}
};
sqlx_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: FrmFilterRow| match dim {
FrmDimensions::FrmStatus => fil.frm_status.map(|i| i.as_ref().to_string()),
FrmDimensions::FrmName => fil.frm_name,
FrmDimensions::FrmTransactionType => {
fil.frm_transaction_type.map(|i| i.as_ref().to_string())
}
})
.collect::<Vec<String>>();
res.query_data.push(FrmFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
|
crates/analytics/src/frm/core.rs
|
analytics::src::frm::core
| 1,422
| true
|
// File: crates/analytics/src/frm/types.rs
// Module: analytics::src::frm::types
use api_models::analytics::frm::{FrmDimensions, FrmFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for FrmFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.frm_status.is_empty() {
builder
.add_filter_in_range_clause(FrmDimensions::FrmStatus, &self.frm_status)
.attach_printable("Error adding frm status filter")?;
}
if !self.frm_name.is_empty() {
builder
.add_filter_in_range_clause(FrmDimensions::FrmName, &self.frm_name)
.attach_printable("Error adding frm name filter")?;
}
if !self.frm_transaction_type.is_empty() {
builder
.add_filter_in_range_clause(
FrmDimensions::FrmTransactionType,
&self.frm_transaction_type,
)
.attach_printable("Error adding frm transaction type filter")?;
}
Ok(())
}
}
|
crates/analytics/src/frm/types.rs
|
analytics::src::frm::types
| 285
| true
|
// File: crates/analytics/src/frm/metrics.rs
// Module: analytics::src::frm::metrics
use api_models::analytics::{
frm::{FrmDimensions, FrmFilters, FrmMetrics, FrmMetricsBucketIdentifier, FrmTransactionType},
Granularity, TimeRange,
};
use diesel_models::enums as storage_enums;
use time::PrimitiveDateTime;
mod frm_blocked_rate;
mod frm_triggered_attempts;
use frm_blocked_rate::FrmBlockedRate;
use frm_triggered_attempts::FrmTriggeredAttempts;
use crate::{
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult},
};
#[derive(Debug, Eq, PartialEq, serde::Deserialize)]
pub struct FrmMetricRow {
pub frm_name: Option<String>,
pub frm_status: Option<DBEnumWrapper<storage_enums::FraudCheckStatus>>,
pub frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>>,
pub total: Option<bigdecimal::BigDecimal>,
pub count: Option<i64>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
pub trait FrmMetricAnalytics: LoadRow<FrmMetricRow> {}
#[async_trait::async_trait]
pub trait FrmMetric<T>
where
T: AnalyticsDataSource + FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> FrmMetric<T> for FrmMetrics
where
T: AnalyticsDataSource + FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
match self {
Self::FrmTriggeredAttempts => {
FrmTriggeredAttempts::default()
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
Self::FrmBlockedRate => {
FrmBlockedRate::default()
.load_metrics(
dimensions,
merchant_id,
filters,
granularity,
time_range,
pool,
)
.await
}
}
}
}
|
crates/analytics/src/frm/metrics.rs
|
analytics::src::frm::metrics
| 724
| true
|
// File: crates/analytics/src/frm/filters.rs
// Module: analytics::src::frm::filters
use api_models::analytics::{
frm::{FrmDimensions, FrmTransactionType},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums::FraudCheckStatus;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{
AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult,
LoadRow,
},
};
pub trait FrmFilterAnalytics: LoadRow<FrmFilterRow> {}
pub async fn get_frm_filter_for_dimension<T>(
dimension: FrmDimensions,
merchant_id: &common_utils::id_type::MerchantId,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<FrmFilterRow>>
where
T: AnalyticsDataSource + FrmFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
query_builder.set_distinct();
query_builder
.execute_query::<FrmFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct FrmFilterRow {
pub frm_status: Option<DBEnumWrapper<FraudCheckStatus>>,
pub frm_transaction_type: Option<DBEnumWrapper<FrmTransactionType>>,
pub frm_name: Option<String>,
}
|
crates/analytics/src/frm/filters.rs
|
analytics::src::frm::filters
| 462
| true
|
// File: crates/analytics/src/frm/accumulator.rs
// Module: analytics::src::frm::accumulator
use api_models::analytics::frm::FrmMetricsBucketValue;
use common_enums::enums as storage_enums;
use super::metrics::FrmMetricRow;
#[derive(Debug, Default)]
pub struct FrmMetricsAccumulator {
pub frm_triggered_attempts: TriggeredAttemptsAccumulator,
pub frm_blocked_rate: BlockedRateAccumulator,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct TriggeredAttemptsAccumulator {
pub count: Option<i64>,
}
#[derive(Debug, Default)]
pub struct BlockedRateAccumulator {
pub fraud: i64,
pub total: i64,
}
pub trait FrmMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow);
fn collect(self) -> Self::MetricOutput;
}
impl FrmMetricAccumulator for TriggeredAttemptsAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl FrmMetricAccumulator for BlockedRateAccumulator {
type MetricOutput = Option<f64>;
fn add_metrics_bucket(&mut self, metrics: &FrmMetricRow) {
if let Some(ref frm_status) = metrics.frm_status {
if frm_status.as_ref() == &storage_enums::FraudCheckStatus::Fraud {
self.fraud += metrics.count.unwrap_or_default();
}
};
self.total += metrics.count.unwrap_or_default();
}
fn collect(self) -> Self::MetricOutput {
if self.total <= 0 {
None
} else {
Some(
f64::from(u32::try_from(self.fraud).ok()?) * 100.0
/ f64::from(u32::try_from(self.total).ok()?),
)
}
}
}
impl FrmMetricsAccumulator {
pub fn collect(self) -> FrmMetricsBucketValue {
FrmMetricsBucketValue {
frm_blocked_rate: self.frm_blocked_rate.collect(),
frm_triggered_attempts: self.frm_triggered_attempts.collect(),
}
}
}
|
crates/analytics/src/frm/accumulator.rs
|
analytics::src::frm::accumulator
| 584
| true
|
// File: crates/analytics/src/frm/metrics/frm_triggered_attempts.rs
// Module: analytics::src::frm::metrics::frm_triggered_attempts
use api_models::analytics::{
frm::{FrmDimensions, FrmFilters, FrmMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::FrmMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct FrmTriggeredAttempts {}
#[async_trait::async_trait]
impl<T> super::FrmMetric<T> for FrmTriggeredAttempts
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>> {
let mut query_builder: QueryBuilder<T> = QueryBuilder::new(AnalyticsCollection::FraudCheck);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<FrmMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
FrmMetricsBucketIdentifier::new(
i.frm_name.as_ref().map(|i| i.to_string()),
i.frm_status.as_ref().map(|i| i.0.to_string()),
i.frm_transaction_type.as_ref().map(|i| i.0.to_string()),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<Vec<_>, crate::query::PostProcessingError>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/frm/metrics/frm_triggered_attempts.rs
|
analytics::src::frm::metrics::frm_triggered_attempts
| 858
| true
|
// File: crates/analytics/src/frm/metrics/frm_blocked_rate.rs
// Module: analytics::src::frm::metrics::frm_blocked_rate
use api_models::analytics::{
frm::{FrmDimensions, FrmFilters, FrmMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::FrmMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
};
#[derive(Default)]
pub(super) struct FrmBlockedRate {}
#[async_trait::async_trait]
impl<T> super::FrmMetric<T> for FrmBlockedRate
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
dimensions: &[FrmDimensions],
merchant_id: &common_utils::id_type::MerchantId,
filters: &FrmFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>>
where
T: AnalyticsDataSource + super::FrmMetricAnalytics,
{
let mut query_builder = QueryBuilder::new(AnalyticsCollection::FraudCheck);
let mut dimensions = dimensions.to_vec();
dimensions.push(FrmDimensions::FrmStatus);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
query_builder
.add_filter_clause("merchant_id", merchant_id)
.switch()?;
time_range.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder.add_group_by_clause(dim).switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.switch()?;
}
query_builder
.execute_query::<FrmMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
FrmMetricsBucketIdentifier::new(
i.frm_name.as_ref().map(|i| i.to_string()),
None,
i.frm_transaction_type.as_ref().map(|i| i.0.to_string()),
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
Vec<(FrmMetricsBucketIdentifier, FrmMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/frm/metrics/frm_blocked_rate.rs
|
analytics::src::frm::metrics::frm_blocked_rate
| 847
| true
|
// File: crates/analytics/src/auth_events/core.rs
// Module: analytics::src::auth_events::core
use std::collections::HashMap;
use api_models::analytics::{
auth_events::{
AuthEventDimensions, AuthEventMetrics, AuthEventMetricsBucketIdentifier,
MetricsBucketResponse,
},
AuthEventFilterValue, AuthEventFiltersResponse, AuthEventMetricsResponse,
AuthEventsAnalyticsMetadata, GetAuthEventFilterRequest, GetAuthEventMetricRequest,
};
use common_utils::types::TimeRange;
use error_stack::{report, ResultExt};
use router_env::{instrument, tracing};
use super::{
filters::{get_auth_events_filter_for_dimension, AuthEventFilterRow},
sankey::{get_sankey_data, SankeyRow},
AuthEventMetricsAccumulator,
};
use crate::{
auth_events::AuthEventMetricAccumulator,
enums::AuthInfo,
errors::{AnalyticsError, AnalyticsResult},
AnalyticsProvider,
};
#[instrument(skip_all)]
pub async fn get_metrics(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: GetAuthEventMetricRequest,
) -> AnalyticsResult<AuthEventMetricsResponse<MetricsBucketResponse>> {
let mut metrics_accumulator: HashMap<
AuthEventMetricsBucketIdentifier,
AuthEventMetricsAccumulator,
> = HashMap::new();
let mut set = tokio::task::JoinSet::new();
for metric_type in req.metrics.iter().cloned() {
let req = req.clone();
let auth_scoped = auth.to_owned();
let pool = pool.clone();
set.spawn(async move {
let data = pool
.get_auth_event_metrics(
&metric_type,
&req.group_by_names.clone(),
&auth_scoped,
&req.filters,
req.time_series.map(|t| t.granularity),
&req.time_range,
)
.await
.change_context(AnalyticsError::UnknownError);
(metric_type, data)
});
}
while let Some((metric, data)) = set
.join_next()
.await
.transpose()
.change_context(AnalyticsError::UnknownError)?
{
for (id, value) in data? {
let metrics_builder = metrics_accumulator.entry(id).or_default();
match metric {
AuthEventMetrics::AuthenticationCount => metrics_builder
.authentication_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationAttemptCount => metrics_builder
.authentication_attempt_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationSuccessCount => metrics_builder
.authentication_success_count
.add_metrics_bucket(&value),
AuthEventMetrics::ChallengeFlowCount => metrics_builder
.challenge_flow_count
.add_metrics_bucket(&value),
AuthEventMetrics::ChallengeAttemptCount => metrics_builder
.challenge_attempt_count
.add_metrics_bucket(&value),
AuthEventMetrics::ChallengeSuccessCount => metrics_builder
.challenge_success_count
.add_metrics_bucket(&value),
AuthEventMetrics::FrictionlessFlowCount => metrics_builder
.frictionless_flow_count
.add_metrics_bucket(&value),
AuthEventMetrics::FrictionlessSuccessCount => metrics_builder
.frictionless_success_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationErrorMessage => metrics_builder
.authentication_error_message
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationFunnel => metrics_builder
.authentication_funnel
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationExemptionApprovedCount => metrics_builder
.authentication_exemption_approved_count
.add_metrics_bucket(&value),
AuthEventMetrics::AuthenticationExemptionRequestedCount => metrics_builder
.authentication_exemption_requested_count
.add_metrics_bucket(&value),
}
}
}
let mut total_error_message_count = 0;
let query_data: Vec<MetricsBucketResponse> = metrics_accumulator
.into_iter()
.map(|(id, val)| {
let collected_values = val.collect();
if let Some(count) = collected_values.error_message_count {
total_error_message_count += count;
}
MetricsBucketResponse {
values: collected_values,
dimensions: id,
}
})
.collect();
Ok(AuthEventMetricsResponse {
query_data,
meta_data: [AuthEventsAnalyticsMetadata {
total_error_message_count: Some(total_error_message_count),
}],
})
}
pub async fn get_filters(
pool: &AnalyticsProvider,
req: GetAuthEventFilterRequest,
auth: &AuthInfo,
) -> AnalyticsResult<AuthEventFiltersResponse> {
let mut res = AuthEventFiltersResponse::default();
for dim in req.group_by_names {
let values = match pool {
AnalyticsProvider::Sqlx(_pool) => {
Err(report!(AnalyticsError::UnknownError))
}
AnalyticsProvider::Clickhouse(pool) => {
get_auth_events_filter_for_dimension(dim, auth, &req.time_range, pool)
.await
.map_err(|e| e.change_context(AnalyticsError::UnknownError))
}
AnalyticsProvider::CombinedCkh(sqlx_pool, ckh_pool) | AnalyticsProvider::CombinedSqlx(sqlx_pool, ckh_pool) => {
let ckh_result = get_auth_events_filter_for_dimension(
dim,
auth,
&req.time_range,
ckh_pool,
)
.await
.map_err(|e| e.change_context(AnalyticsError::UnknownError));
let sqlx_result = get_auth_events_filter_for_dimension(
dim,
auth,
&req.time_range,
sqlx_pool,
)
.await
.map_err(|e| e.change_context(AnalyticsError::UnknownError));
match (&sqlx_result, &ckh_result) {
(Ok(ref sqlx_res), Ok(ref ckh_res)) if sqlx_res != ckh_res => {
router_env::logger::error!(clickhouse_result=?ckh_res, postgres_result=?sqlx_res, "Mismatch between clickhouse & postgres refunds analytics filters")
},
_ => {}
};
ckh_result
}
}
.change_context(AnalyticsError::UnknownError)?
.into_iter()
.filter_map(|fil: AuthEventFilterRow| match dim {
AuthEventDimensions::AuthenticationStatus => fil.authentication_status.map(|i| i.as_ref().to_string()),
AuthEventDimensions::TransactionStatus => fil.trans_status.map(|i| i.as_ref().to_string()),
AuthEventDimensions::AuthenticationType => fil.authentication_type.map(|i| i.as_ref().to_string()),
AuthEventDimensions::ErrorMessage => fil.error_message,
AuthEventDimensions::AuthenticationConnector => fil.authentication_connector.map(|i| i.as_ref().to_string()),
AuthEventDimensions::MessageVersion => fil.message_version,
AuthEventDimensions::AcsReferenceNumber => fil.acs_reference_number,
AuthEventDimensions::Platform => fil.platform,
AuthEventDimensions::Mcc => fil.mcc,
AuthEventDimensions::Currency => fil.currency.map(|i| i.as_ref().to_string()),
AuthEventDimensions::MerchantCountry => fil.merchant_country,
AuthEventDimensions::BillingCountry => fil.billing_country,
AuthEventDimensions::ShippingCountry => fil.shipping_country,
AuthEventDimensions::IssuerCountry => fil.issuer_country,
AuthEventDimensions::EarliestSupportedVersion => fil.earliest_supported_version,
AuthEventDimensions::LatestSupportedVersion => fil.latest_supported_version,
AuthEventDimensions::WhitelistDecision => fil.whitelist_decision.map(|i| i.to_string()),
AuthEventDimensions::DeviceManufacturer => fil.device_manufacturer,
AuthEventDimensions::DeviceType => fil.device_type,
AuthEventDimensions::DeviceBrand => fil.device_brand,
AuthEventDimensions::DeviceOs => fil.device_os,
AuthEventDimensions::DeviceDisplay => fil.device_display,
AuthEventDimensions::BrowserName => fil.browser_name,
AuthEventDimensions::BrowserVersion => fil.browser_version,
AuthEventDimensions::IssuerId => fil.issuer_id,
AuthEventDimensions::SchemeName => fil.scheme_name,
AuthEventDimensions::ExemptionRequested => fil.exemption_requested.map(|i| i.to_string()),
AuthEventDimensions::ExemptionAccepted => fil.exemption_accepted.map(|i| i.to_string()),
})
.collect::<Vec<String>>();
res.query_data.push(AuthEventFilterValue {
dimension: dim,
values,
})
}
Ok(res)
}
#[instrument(skip_all)]
pub async fn get_sankey(
pool: &AnalyticsProvider,
auth: &AuthInfo,
req: TimeRange,
) -> AnalyticsResult<Vec<SankeyRow>> {
match pool {
AnalyticsProvider::Sqlx(_) => Err(AnalyticsError::NotImplemented(
"Sankey not implemented for sqlx",
))?,
AnalyticsProvider::Clickhouse(ckh_pool)
| AnalyticsProvider::CombinedCkh(_, ckh_pool)
| AnalyticsProvider::CombinedSqlx(_, ckh_pool) => {
let sankey_rows = get_sankey_data(ckh_pool, auth, &req)
.await
.change_context(AnalyticsError::UnknownError)?;
Ok(sankey_rows)
}
}
}
|
crates/analytics/src/auth_events/core.rs
|
analytics::src::auth_events::core
| 1,975
| true
|
// File: crates/analytics/src/auth_events/types.rs
// Module: analytics::src::auth_events::types
use api_models::analytics::auth_events::{AuthEventDimensions, AuthEventFilters};
use error_stack::ResultExt;
use crate::{
query::{QueryBuilder, QueryFilter, QueryResult, ToSql},
types::{AnalyticsCollection, AnalyticsDataSource},
};
impl<T> QueryFilter<T> for AuthEventFilters
where
T: AnalyticsDataSource,
AnalyticsCollection: ToSql<T>,
{
fn set_filter_clause(&self, builder: &mut QueryBuilder<T>) -> QueryResult<()> {
if !self.authentication_status.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::AuthenticationStatus,
&self.authentication_status,
)
.attach_printable("Error adding authentication status filter")?;
}
if !self.trans_status.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::TransactionStatus,
&self.trans_status,
)
.attach_printable("Error adding transaction status filter")?;
}
if !self.error_message.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::ErrorMessage, &self.error_message)
.attach_printable("Error adding error message filter")?;
}
if !self.authentication_connector.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::AuthenticationConnector,
&self.authentication_connector,
)
.attach_printable("Error adding authentication connector filter")?;
}
if !self.message_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::MessageVersion,
&self.message_version,
)
.attach_printable("Error adding message version filter")?;
}
if !self.platform.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::Platform, &self.platform)
.attach_printable("Error adding platform filter")?;
}
if !self.acs_reference_number.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::AcsReferenceNumber,
&self.acs_reference_number,
)
.attach_printable("Error adding acs reference number filter")?;
}
if !self.mcc.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::Mcc, &self.mcc)
.attach_printable("Failed to add MCC filter")?;
}
if !self.currency.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::Currency, &self.currency)
.attach_printable("Failed to add currency filter")?;
}
if !self.merchant_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::MerchantCountry,
&self.merchant_country,
)
.attach_printable("Failed to add merchant country filter")?;
}
if !self.billing_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::BillingCountry,
&self.billing_country,
)
.attach_printable("Failed to add billing country filter")?;
}
if !self.shipping_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::ShippingCountry,
&self.shipping_country,
)
.attach_printable("Failed to add shipping country filter")?;
}
if !self.issuer_country.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::IssuerCountry,
&self.issuer_country,
)
.attach_printable("Failed to add issuer country filter")?;
}
if !self.earliest_supported_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::EarliestSupportedVersion,
&self.earliest_supported_version,
)
.attach_printable("Failed to add earliest supported version filter")?;
}
if !self.latest_supported_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::LatestSupportedVersion,
&self.latest_supported_version,
)
.attach_printable("Failed to add latest supported version filter")?;
}
if !self.whitelist_decision.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::WhitelistDecision,
&self.whitelist_decision,
)
.attach_printable("Failed to add whitelist decision filter")?;
}
if !self.device_manufacturer.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::DeviceManufacturer,
&self.device_manufacturer,
)
.attach_printable("Failed to add device manufacturer filter")?;
}
if !self.device_type.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::DeviceType, &self.device_type)
.attach_printable("Failed to add device type filter")?;
}
if !self.device_brand.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::DeviceBrand, &self.device_brand)
.attach_printable("Failed to add device brand filter")?;
}
if !self.device_os.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::DeviceOs, &self.device_os)
.attach_printable("Failed to add device OS filter")?;
}
if !self.device_display.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::DeviceDisplay,
&self.device_display,
)
.attach_printable("Failed to add device display filter")?;
}
if !self.browser_name.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::BrowserName, &self.browser_name)
.attach_printable("Failed to add browser name filter")?;
}
if !self.browser_version.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::BrowserVersion,
&self.browser_version,
)
.attach_printable("Failed to add browser version filter")?;
}
if !self.issuer_id.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::IssuerId, &self.issuer_id)
.attach_printable("Failed to add issuer ID filter")?;
}
if !self.scheme_name.is_empty() {
builder
.add_filter_in_range_clause(AuthEventDimensions::SchemeName, &self.scheme_name)
.attach_printable("Failed to add scheme name filter")?;
}
if !self.exemption_requested.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::ExemptionRequested,
&self.exemption_requested,
)
.attach_printable("Failed to add exemption requested filter")?;
}
if !self.exemption_accepted.is_empty() {
builder
.add_filter_in_range_clause(
AuthEventDimensions::ExemptionAccepted,
&self.exemption_accepted,
)
.attach_printable("Failed to add exemption accepted filter")?;
}
Ok(())
}
}
|
crates/analytics/src/auth_events/types.rs
|
analytics::src::auth_events::types
| 1,537
| true
|
// File: crates/analytics/src/auth_events/metrics.rs
// Module: analytics::src::auth_events::metrics
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{
AuthEventDimensions, AuthEventFilters, AuthEventMetrics, AuthEventMetricsBucketIdentifier,
},
Granularity, TimeRange,
};
use diesel_models::enums as storage_enums;
use time::PrimitiveDateTime;
use crate::{
query::{Aggregate, GroupByClause, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, LoadRow, MetricsResult},
AuthInfo,
};
mod authentication_attempt_count;
mod authentication_count;
mod authentication_error_message;
mod authentication_exemption_approved_count;
mod authentication_exemption_requested_count;
mod authentication_funnel;
mod authentication_success_count;
mod challenge_attempt_count;
mod challenge_flow_count;
mod challenge_success_count;
mod frictionless_flow_count;
mod frictionless_success_count;
use authentication_attempt_count::AuthenticationAttemptCount;
use authentication_count::AuthenticationCount;
use authentication_error_message::AuthenticationErrorMessage;
use authentication_exemption_approved_count::AuthenticationExemptionApprovedCount;
use authentication_exemption_requested_count::AuthenticationExemptionRequestedCount;
use authentication_funnel::AuthenticationFunnel;
use authentication_success_count::AuthenticationSuccessCount;
use challenge_attempt_count::ChallengeAttemptCount;
use challenge_flow_count::ChallengeFlowCount;
use challenge_success_count::ChallengeSuccessCount;
use frictionless_flow_count::FrictionlessFlowCount;
use frictionless_success_count::FrictionlessSuccessCount;
#[derive(Debug, PartialEq, Eq, serde::Deserialize, Hash)]
pub struct AuthEventMetricRow {
pub count: Option<i64>,
pub authentication_status: Option<DBEnumWrapper<storage_enums::AuthenticationStatus>>,
pub trans_status: Option<DBEnumWrapper<storage_enums::TransactionStatus>>,
pub authentication_type: Option<DBEnumWrapper<storage_enums::DecoupledAuthenticationType>>,
pub error_message: Option<String>,
pub authentication_connector: Option<DBEnumWrapper<storage_enums::AuthenticationConnectors>>,
pub message_version: Option<String>,
pub acs_reference_number: Option<String>,
pub platform: Option<String>,
pub mcc: Option<String>,
pub currency: Option<DBEnumWrapper<storage_enums::Currency>>,
pub merchant_country: Option<String>,
pub billing_country: Option<String>,
pub shipping_country: Option<String>,
pub issuer_country: Option<String>,
pub earliest_supported_version: Option<String>,
pub latest_supported_version: Option<String>,
pub whitelist_decision: Option<bool>,
pub device_manufacturer: Option<String>,
pub device_type: Option<String>,
pub device_brand: Option<String>,
pub device_os: Option<String>,
pub device_display: Option<String>,
pub browser_name: Option<String>,
pub browser_version: Option<String>,
pub issuer_id: Option<String>,
pub scheme_name: Option<String>,
pub exemption_requested: Option<bool>,
pub exemption_accepted: Option<bool>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub start_bucket: Option<PrimitiveDateTime>,
#[serde(with = "common_utils::custom_serde::iso8601::option")]
pub end_bucket: Option<PrimitiveDateTime>,
}
pub trait AuthEventMetricAnalytics: LoadRow<AuthEventMetricRow> {}
#[async_trait::async_trait]
pub trait AuthEventMetric<T>
where
T: AnalyticsDataSource + AuthEventMetricAnalytics,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>>;
}
#[async_trait::async_trait]
impl<T> AuthEventMetric<T> for AuthEventMetrics
where
T: AnalyticsDataSource + AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
match self {
Self::AuthenticationCount => {
AuthenticationCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationAttemptCount => {
AuthenticationAttemptCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationSuccessCount => {
AuthenticationSuccessCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::ChallengeFlowCount => {
ChallengeFlowCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::ChallengeAttemptCount => {
ChallengeAttemptCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::ChallengeSuccessCount => {
ChallengeSuccessCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::FrictionlessFlowCount => {
FrictionlessFlowCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::FrictionlessSuccessCount => {
FrictionlessSuccessCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationErrorMessage => {
AuthenticationErrorMessage
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationFunnel => {
AuthenticationFunnel
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationExemptionApprovedCount => {
AuthenticationExemptionApprovedCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
Self::AuthenticationExemptionRequestedCount => {
AuthenticationExemptionRequestedCount
.load_metrics(auth, dimensions, filters, granularity, time_range, pool)
.await
}
}
}
}
|
crates/analytics/src/auth_events/metrics.rs
|
analytics::src::auth_events::metrics
| 1,413
| true
|
// File: crates/analytics/src/auth_events/sankey.rs
// Module: analytics::src::auth_events::sankey
use common_enums::AuthenticationStatus;
use common_utils::{
errors::ParsingError,
types::{authentication::AuthInfo, TimeRange},
};
use error_stack::ResultExt;
use router_env::logger;
use crate::{
clickhouse::ClickhouseClient,
query::{Aggregate, QueryBuilder, QueryFilter},
types::{AnalyticsCollection, MetricsError, MetricsResult},
};
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct SankeyRow {
pub count: i64,
pub authentication_status: Option<AuthenticationStatus>,
pub exemption_requested: Option<bool>,
pub exemption_accepted: Option<bool>,
}
impl TryInto<SankeyRow> for serde_json::Value {
type Error = error_stack::Report<ParsingError>;
fn try_into(self) -> Result<SankeyRow, Self::Error> {
logger::debug!("Parsing SankeyRow from {:?}", self);
serde_json::from_value(self).change_context(ParsingError::StructParseFailure(
"Failed to parse Sankey in clickhouse results",
))
}
}
pub async fn get_sankey_data(
clickhouse_client: &ClickhouseClient,
auth: &AuthInfo,
time_range: &TimeRange,
) -> MetricsResult<Vec<SankeyRow>> {
let mut query_builder =
QueryBuilder::<ClickhouseClient>::new(AnalyticsCollection::Authentications);
query_builder
.add_select_column(Aggregate::<String>::Count {
field: None,
alias: Some("count"),
})
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("exemption_requested")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("exemption_accepted")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_select_column("authentication_status")
.change_context(MetricsError::QueryBuildingError)?;
auth.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
time_range
.set_filter_clause(&mut query_builder)
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("exemption_requested")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("exemption_accepted")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.add_group_by_clause("authentication_status")
.change_context(MetricsError::QueryBuildingError)?;
query_builder
.execute_query::<SankeyRow, _>(clickhouse_client)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(Ok)
.collect()
}
|
crates/analytics/src/auth_events/sankey.rs
|
analytics::src::auth_events::sankey
| 628
| true
|
// File: crates/analytics/src/auth_events/filters.rs
// Module: analytics::src::auth_events::filters
use api_models::analytics::{auth_events::AuthEventDimensions, Granularity, TimeRange};
use common_enums::{Currency, DecoupledAuthenticationType};
use common_utils::errors::ReportSwitchExt;
use diesel_models::enums::{AuthenticationConnectors, AuthenticationStatus, TransactionStatus};
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use crate::{
enums::AuthInfo,
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, ToSql, Window},
types::{
AnalyticsCollection, AnalyticsDataSource, DBEnumWrapper, FiltersError, FiltersResult,
LoadRow,
},
};
pub trait AuthEventFilterAnalytics: LoadRow<AuthEventFilterRow> {}
pub async fn get_auth_events_filter_for_dimension<T>(
dimension: AuthEventDimensions,
auth: &AuthInfo,
time_range: &TimeRange,
pool: &T,
) -> FiltersResult<Vec<AuthEventFilterRow>>
where
T: AnalyticsDataSource + AuthEventFilterAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
query_builder.add_select_column(dimension).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
query_builder.set_distinct();
auth.set_filter_clause(&mut query_builder).switch()?;
query_builder
.execute_query::<AuthEventFilterRow, _>(pool)
.await
.change_context(FiltersError::QueryBuildingError)?
.change_context(FiltersError::QueryExecutionFailure)
}
#[derive(Debug, serde::Serialize, Eq, PartialEq, serde::Deserialize)]
pub struct AuthEventFilterRow {
pub authentication_status: Option<DBEnumWrapper<AuthenticationStatus>>,
pub trans_status: Option<DBEnumWrapper<TransactionStatus>>,
pub authentication_type: Option<DBEnumWrapper<DecoupledAuthenticationType>>,
pub error_message: Option<String>,
pub authentication_connector: Option<DBEnumWrapper<AuthenticationConnectors>>,
pub message_version: Option<String>,
pub acs_reference_number: Option<String>,
pub platform: Option<String>,
pub mcc: Option<String>,
pub currency: Option<DBEnumWrapper<Currency>>,
pub merchant_country: Option<String>,
pub billing_country: Option<String>,
pub shipping_country: Option<String>,
pub issuer_country: Option<String>,
pub earliest_supported_version: Option<String>,
pub latest_supported_version: Option<String>,
pub whitelist_decision: Option<bool>,
pub device_manufacturer: Option<String>,
pub device_type: Option<String>,
pub device_brand: Option<String>,
pub device_os: Option<String>,
pub device_display: Option<String>,
pub browser_name: Option<String>,
pub browser_version: Option<String>,
pub issuer_id: Option<String>,
pub scheme_name: Option<String>,
pub exemption_requested: Option<bool>,
pub exemption_accepted: Option<bool>,
}
|
crates/analytics/src/auth_events/filters.rs
|
analytics::src::auth_events::filters
| 693
| true
|
// File: crates/analytics/src/auth_events/accumulator.rs
// Module: analytics::src::auth_events::accumulator
use api_models::analytics::auth_events::AuthEventMetricsBucketValue;
use super::metrics::AuthEventMetricRow;
#[derive(Debug, Default)]
pub struct AuthEventMetricsAccumulator {
pub authentication_count: CountAccumulator,
pub authentication_attempt_count: CountAccumulator,
pub authentication_error_message: AuthenticationErrorMessageAccumulator,
pub authentication_success_count: CountAccumulator,
pub challenge_flow_count: CountAccumulator,
pub challenge_attempt_count: CountAccumulator,
pub challenge_success_count: CountAccumulator,
pub frictionless_flow_count: CountAccumulator,
pub frictionless_success_count: CountAccumulator,
pub authentication_funnel: CountAccumulator,
pub authentication_exemption_approved_count: CountAccumulator,
pub authentication_exemption_requested_count: CountAccumulator,
}
#[derive(Debug, Default)]
#[repr(transparent)]
pub struct CountAccumulator {
pub count: Option<i64>,
}
#[derive(Debug, Default)]
pub struct AuthenticationErrorMessageAccumulator {
pub count: Option<i64>,
}
pub trait AuthEventMetricAccumulator {
type MetricOutput;
fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow);
fn collect(self) -> Self::MetricOutput;
}
impl AuthEventMetricAccumulator for CountAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl AuthEventMetricAccumulator for AuthenticationErrorMessageAccumulator {
type MetricOutput = Option<u64>;
#[inline]
fn add_metrics_bucket(&mut self, metrics: &AuthEventMetricRow) {
self.count = match (self.count, metrics.count) {
(None, None) => None,
(None, i @ Some(_)) | (i @ Some(_), None) => i,
(Some(a), Some(b)) => Some(a + b),
}
}
#[inline]
fn collect(self) -> Self::MetricOutput {
self.count.and_then(|i| u64::try_from(i).ok())
}
}
impl AuthEventMetricsAccumulator {
pub fn collect(self) -> AuthEventMetricsBucketValue {
AuthEventMetricsBucketValue {
authentication_count: self.authentication_count.collect(),
authentication_attempt_count: self.authentication_attempt_count.collect(),
authentication_success_count: self.authentication_success_count.collect(),
challenge_flow_count: self.challenge_flow_count.collect(),
challenge_attempt_count: self.challenge_attempt_count.collect(),
challenge_success_count: self.challenge_success_count.collect(),
frictionless_flow_count: self.frictionless_flow_count.collect(),
frictionless_success_count: self.frictionless_success_count.collect(),
error_message_count: self.authentication_error_message.collect(),
authentication_funnel: self.authentication_funnel.collect(),
authentication_exemption_approved_count: self
.authentication_exemption_approved_count
.collect(),
authentication_exemption_requested_count: self
.authentication_exemption_requested_count
.collect(),
}
}
}
|
crates/analytics/src/auth_events/accumulator.rs
|
analytics::src::auth_events::accumulator
| 776
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_exemption_approved_count.rs
// Module: analytics::src::auth_events::metrics::authentication_exemption_approved_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationExemptionApprovedCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationExemptionApprovedCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(AuthEventDimensions::ExemptionAccepted, true)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_exemption_approved_count.rs
|
analytics::src::auth_events::metrics::authentication_exemption_approved_count
| 1,072
| true
|
// File: crates/analytics/src/auth_events/metrics/frictionless_flow_count.rs
// Module: analytics::src::auth_events::metrics::frictionless_flow_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::DecoupledAuthenticationType;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct FrictionlessFlowCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for FrictionlessFlowCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(
"authentication_type",
DecoupledAuthenticationType::Frictionless,
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/frictionless_flow_count.rs
|
analytics::src::auth_events::metrics::frictionless_flow_count
| 1,092
| true
|
// File: crates/analytics/src/auth_events/metrics/frictionless_success_count.rs
// Module: analytics::src::auth_events::metrics::frictionless_success_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::{AuthenticationStatus, DecoupledAuthenticationType};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct FrictionlessSuccessCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for FrictionlessSuccessCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(
"authentication_type",
DecoupledAuthenticationType::Frictionless,
)
.switch()?;
query_builder
.add_filter_clause("authentication_status", AuthenticationStatus::Success)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/frictionless_success_count.rs
|
analytics::src::auth_events::metrics::frictionless_success_count
| 1,118
| true
|
// File: crates/analytics/src/auth_events/metrics/challenge_flow_count.rs
// Module: analytics::src::auth_events::metrics::challenge_flow_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::DecoupledAuthenticationType;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct ChallengeFlowCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for ChallengeFlowCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(
"authentication_type",
DecoupledAuthenticationType::Challenge,
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/challenge_flow_count.rs
|
analytics::src::auth_events::metrics::challenge_flow_count
| 1,083
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_exemption_requested_count.rs
// Module: analytics::src::auth_events::metrics::authentication_exemption_requested_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationExemptionRequestedCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationExemptionRequestedCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(AuthEventDimensions::ExemptionRequested, true)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_exemption_requested_count.rs
|
analytics::src::auth_events::metrics::authentication_exemption_requested_count
| 1,073
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_count.rs
// Module: analytics::src::auth_events::metrics::authentication_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_count.rs
|
analytics::src::auth_events::metrics::authentication_count
| 1,037
| true
|
// File: crates/analytics/src/auth_events/metrics/challenge_attempt_count.rs
// Module: analytics::src::auth_events::metrics::challenge_attempt_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::{AuthenticationStatus, DecoupledAuthenticationType};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct ChallengeAttemptCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for ChallengeAttemptCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause(
"authentication_type",
DecoupledAuthenticationType::Challenge,
)
.switch()?;
query_builder
.add_filter_in_range_clause(
"authentication_status",
&[AuthenticationStatus::Success, AuthenticationStatus::Failed],
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/challenge_attempt_count.rs
|
analytics::src::auth_events::metrics::challenge_attempt_count
| 1,122
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_attempt_count.rs
// Module: analytics::src::auth_events::metrics::authentication_attempt_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::AuthenticationStatus;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationAttemptCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationAttemptCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_in_range_clause(
"authentication_status",
&[AuthenticationStatus::Success, AuthenticationStatus::Failed],
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_attempt_count.rs
|
analytics::src::auth_events::metrics::authentication_attempt_count
| 1,084
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_success_count.rs
// Module: analytics::src::auth_events::metrics::authentication_success_count
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::AuthenticationStatus;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{Aggregate, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql, Window},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationSuccessCount;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationSuccessCount
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause("authentication_status", AuthenticationStatus::Success)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_success_count.rs
|
analytics::src::auth_events::metrics::authentication_success_count
| 1,071
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_error_message.rs
// Module: analytics::src::auth_events::metrics::authentication_error_message
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_enums::AuthenticationStatus;
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{
Aggregate, FilterTypes, GroupByClause, Order, QueryBuilder, QueryFilter, SeriesBucket,
ToSql, Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationErrorMessage;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationErrorMessage
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column("sum(sign_flag) AS count")
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_filter_clause("authentication_status", AuthenticationStatus::Failed)
.switch()?;
query_builder
.add_custom_filter_clause(
AuthEventDimensions::ErrorMessage,
"NULL",
FilterTypes::IsNotNull,
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
query_builder
.add_order_by_clause("count", Order::Descending)
.attach_printable("Error adding order by clause")
.switch()?;
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_error_message.rs
|
analytics::src::auth_events::metrics::authentication_error_message
| 1,136
| true
|
// File: crates/analytics/src/auth_events/metrics/authentication_funnel.rs
// Module: analytics::src::auth_events::metrics::authentication_funnel
use std::collections::HashSet;
use api_models::analytics::{
auth_events::{AuthEventDimensions, AuthEventFilters, AuthEventMetricsBucketIdentifier},
Granularity, TimeRange,
};
use common_utils::errors::ReportSwitchExt;
use error_stack::ResultExt;
use time::PrimitiveDateTime;
use super::AuthEventMetricRow;
use crate::{
query::{
Aggregate, FilterTypes, GroupByClause, QueryBuilder, QueryFilter, SeriesBucket, ToSql,
Window,
},
types::{AnalyticsCollection, AnalyticsDataSource, MetricsError, MetricsResult},
AuthInfo,
};
#[derive(Default)]
pub(super) struct AuthenticationFunnel;
#[async_trait::async_trait]
impl<T> super::AuthEventMetric<T> for AuthenticationFunnel
where
T: AnalyticsDataSource + super::AuthEventMetricAnalytics,
PrimitiveDateTime: ToSql<T>,
AnalyticsCollection: ToSql<T>,
Granularity: GroupByClause<T>,
Aggregate<&'static str>: ToSql<T>,
Window<&'static str>: ToSql<T>,
{
async fn load_metrics(
&self,
auth: &AuthInfo,
dimensions: &[AuthEventDimensions],
filters: &AuthEventFilters,
granularity: Option<Granularity>,
time_range: &TimeRange,
pool: &T,
) -> MetricsResult<HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>> {
let mut query_builder: QueryBuilder<T> =
QueryBuilder::new(AnalyticsCollection::Authentications);
for dim in dimensions.iter() {
query_builder.add_select_column(dim).switch()?;
}
query_builder
.add_select_column(Aggregate::Count {
field: None,
alias: Some("count"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Min {
field: "created_at",
alias: Some("start_bucket"),
})
.switch()?;
query_builder
.add_select_column(Aggregate::Max {
field: "created_at",
alias: Some("end_bucket"),
})
.switch()?;
query_builder
.add_custom_filter_clause(
AuthEventDimensions::TransactionStatus,
"NULL",
FilterTypes::IsNotNull,
)
.switch()?;
filters.set_filter_clause(&mut query_builder).switch()?;
time_range
.set_filter_clause(&mut query_builder)
.attach_printable("Error filtering time range")
.switch()?;
auth.set_filter_clause(&mut query_builder).switch()?;
for dim in dimensions.iter() {
query_builder
.add_group_by_clause(dim)
.attach_printable("Error grouping by dimensions")
.switch()?;
}
if let Some(granularity) = granularity {
granularity
.set_group_by_clause(&mut query_builder)
.attach_printable("Error adding granularity")
.switch()?;
}
query_builder
.execute_query::<AuthEventMetricRow, _>(pool)
.await
.change_context(MetricsError::QueryBuildingError)?
.change_context(MetricsError::QueryExecutionFailure)?
.into_iter()
.map(|i| {
Ok((
AuthEventMetricsBucketIdentifier::new(
i.authentication_status.as_ref().map(|i| i.0),
i.trans_status.as_ref().map(|i| i.0.clone()),
i.authentication_type.as_ref().map(|i| i.0),
i.error_message.clone(),
i.authentication_connector.as_ref().map(|i| i.0),
i.message_version.clone(),
i.acs_reference_number.clone(),
i.mcc.clone(),
i.currency.as_ref().map(|i| i.0),
i.merchant_country.clone(),
i.billing_country.clone(),
i.shipping_country.clone(),
i.issuer_country.clone(),
i.earliest_supported_version.clone(),
i.latest_supported_version.clone(),
i.whitelist_decision,
i.device_manufacturer.clone(),
i.device_type.clone(),
i.device_brand.clone(),
i.device_os.clone(),
i.device_display.clone(),
i.browser_name.clone(),
i.browser_version.clone(),
i.issuer_id.clone(),
i.scheme_name.clone(),
i.exemption_requested,
i.exemption_accepted,
TimeRange {
start_time: match (granularity, i.start_bucket) {
(Some(g), Some(st)) => g.clip_to_start(st)?,
_ => time_range.start_time,
},
end_time: granularity.as_ref().map_or_else(
|| Ok(time_range.end_time),
|g| i.end_bucket.map(|et| g.clip_to_end(et)).transpose(),
)?,
},
),
i,
))
})
.collect::<error_stack::Result<
HashSet<(AuthEventMetricsBucketIdentifier, AuthEventMetricRow)>,
crate::query::PostProcessingError,
>>()
.change_context(MetricsError::PostProcessingFailure)
}
}
|
crates/analytics/src/auth_events/metrics/authentication_funnel.rs
|
analytics::src::auth_events::metrics::authentication_funnel
| 1,084
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.