impl for passing order and header_icons

pull/143/head
kyoto7250 2 years ago
parent 7a32f3ac12
commit f4e7877c6e

@ -22,6 +22,8 @@ pub trait Pool: Send + Sync {
table: &Table,
page: u16,
filter: Option<String>,
orders: Option<String>,
header_icons: Option<Vec<String>>,
) -> anyhow::Result<(Vec<String>, Vec<Vec<String>>)>;
async fn get_columns(
&self,
@ -46,6 +48,20 @@ pub trait Pool: Send + Sync {
async fn close(&self);
}
fn concat_headers(headers: Vec<String>, header_icons: Option<Vec<String>>) -> Vec<String> {
if let Some(header_icons) = &header_icons {
let mut new_headers = vec![String::new(); headers.len()];
for (index, header) in headers.iter().enumerate() {
new_headers[index] = format!("{} {}", header, header_icons[index])
.trim()
.to_string();
}
return new_headers;
} else {
return headers;
}
}
pub enum ExecuteResult {
Read {
headers: Vec<String>,
@ -69,3 +85,27 @@ macro_rules! get_or_null {
$value.map_or("NULL".to_string(), |v| v.to_string())
};
}
#[cfg(test)]
mod test {
use super::concat_headers;
#[test]
fn test_concat_headers() {
let headers = vec![
"ID".to_string(),
"NAME".to_string(),
"TIMESTAMP".to_string(),
];
let header_icons = vec!["".to_string(), "↑1".to_string(), "↓2".to_string()];
let concat_headers: Vec<String> = concat_headers(headers, Some(header_icons));
assert_eq!(
concat_headers,
vec![
"ID".to_string(),
"NAME ↑1".to_string(),
"TIMESTAMP ↓2".to_string()
]
)
}
}

@ -1,6 +1,6 @@
use crate::get_or_null;
use super::{ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE};
use super::{concat_headers, ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE};
use async_trait::async_trait;
use chrono::{NaiveDate, NaiveDateTime, NaiveTime};
use database_tree::{Child, Database, Table};
@ -228,21 +228,42 @@ impl Pool for MySqlPool {
table: &Table,
page: u16,
filter: Option<String>,
orders: Option<String>,
header_icons: Option<Vec<String>>,
) -> anyhow::Result<(Vec<String>, Vec<Vec<String>>)> {
let query = if let Some(filter) = filter {
let query = if let (Some(filter), Some(orders)) = (&filter, &orders) {
format!(
"SELECT * FROM `{database}`.`{table}` WHERE {filter} {orders} LIMIT {page}, {limit}",
database = database.name,
table = table.name,
filter = filter,
page = page,
limit = RECORDS_LIMIT_PER_PAGE,
orders = orders
)
} else if let Some(filter) = filter {
format!(
"SELECT * FROM `{database}`.`{table}` WHERE {filter} LIMIT {page}, {limit}",
database = database.name,
table = table.name,
filter = filter,
page = page,
limit = RECORDS_LIMIT_PER_PAGE
limit = RECORDS_LIMIT_PER_PAGE,
)
} else if let Some(orders) = orders {
format!(
"SELECT * FROM `{database}`.`{table}` {orders} LIMIT {page}, {limit}",
database = database.name,
table = table.name,
orders = orders,
page = page,
limit = RECORDS_LIMIT_PER_PAGE,
)
} else {
format!(
"SELECT * FROM `{}`.`{}` LIMIT {page}, {limit}",
database.name,
table.name,
"SELECT * FROM `{database}`.`{table}` LIMIT {page}, {limit}",
database = database.name,
table = table.name,
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
@ -262,7 +283,7 @@ impl Pool for MySqlPool {
}
records.push(new_row)
}
Ok((headers, records))
Ok((concat_headers(headers, header_icons), records))
}
async fn get_columns(

@ -1,6 +1,6 @@
use crate::get_or_null;
use super::{ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE};
use super::{concat_headers, ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE};
use async_trait::async_trait;
use chrono::{NaiveDate, NaiveDateTime, NaiveTime};
use database_tree::{Child, Database, Schema, Table};
@ -245,8 +245,21 @@ impl Pool for PostgresPool {
table: &Table,
page: u16,
filter: Option<String>,
orders: Option<String>,
header_icons: Option<Vec<String>>,
) -> anyhow::Result<(Vec<String>, Vec<Vec<String>>)> {
let query = if let Some(filter) = filter.as_ref() {
let query = if let (Some(filter), Some(orders)) = (&filter, &orders) {
format!(
r#"SELECT * FROM "{database}"."{table_schema}"."{table}" WHERE {filter} {orders} LIMIT {limit} OFFSET {page}"#,
database = database.name,
table = table.name,
filter = filter,
orders = orders,
table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()),
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else if let Some(filter) = &filter {
format!(
r#"SELECT * FROM "{database}"."{table_schema}"."{table}" WHERE {filter} LIMIT {limit} OFFSET {page}"#,
database = database.name,
@ -256,6 +269,16 @@ impl Pool for PostgresPool {
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else if let Some(orders) = &orders {
format!(
r#"SELECT * FROM "{database}"."{table_schema}"."{table}" {orders} LIMIT {limit} OFFSET {page}"#,
database = database.name,
table = table.name,
orders = orders,
table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()),
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else {
format!(
r#"SELECT * FROM "{database}"."{table_schema}"."{table}" LIMIT {limit} OFFSET {page}"#,
@ -283,8 +306,14 @@ impl Pool for PostgresPool {
Err(_) => {
if json_records.is_none() {
json_records = Some(
self.get_json_records(database, table, page, filter.clone())
.await?,
self.get_json_records(
database,
table,
page,
filter.clone(),
orders.clone(),
)
.await?,
);
}
if let Some(json_records) = &json_records {
@ -315,7 +344,7 @@ impl Pool for PostgresPool {
}
records.push(new_row)
}
Ok((headers, records))
Ok((concat_headers(headers, header_icons), records))
}
async fn get_columns(
@ -479,8 +508,20 @@ impl PostgresPool {
table: &Table,
page: u16,
filter: Option<String>,
orders: Option<String>,
) -> anyhow::Result<Vec<serde_json::Value>> {
let query = if let Some(filter) = filter {
let query = if let (Some(filter), Some(orders)) = (&filter, &orders) {
format!(
r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" WHERE {filter} {orders} LIMIT {limit} OFFSET {page}"#,
database = database.name,
table = table.name,
filter = filter,
orders = orders,
table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()),
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else if let Some(filter) = filter {
format!(
r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" WHERE {filter} LIMIT {limit} OFFSET {page}"#,
database = database.name,
@ -490,6 +531,16 @@ impl PostgresPool {
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else if let Some(orders) = orders {
format!(
r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" {orders} LIMIT {limit} OFFSET {page}"#,
database = database.name,
table = table.name,
orders = orders,
table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()),
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else {
format!(
r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" LIMIT {limit} OFFSET {page}"#,

@ -1,6 +1,6 @@
use crate::get_or_null;
use super::{ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE};
use super::{concat_headers, ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE};
use async_trait::async_trait;
use chrono::NaiveDateTime;
use database_tree::{Child, Database, Table};
@ -230,19 +230,38 @@ impl Pool for SqlitePool {
table: &Table,
page: u16,
filter: Option<String>,
orders: Option<String>,
header_icons: Option<Vec<String>>,
) -> anyhow::Result<(Vec<String>, Vec<Vec<String>>)> {
let query = if let Some(filter) = filter {
let query = if let (Some(filter), Some(orders)) = (&filter, &orders) {
format!(
"SELECT * FROM `{table}` WHERE {filter} {orders} LIMIT {page}, {limit}",
table = table.name,
filter = filter,
page = page,
limit = RECORDS_LIMIT_PER_PAGE,
orders = orders
)
} else if let Some(filter) = filter {
format!(
"SELECT * FROM `{table}` WHERE {filter} LIMIT {page}, {limit}",
table = table.name,
filter = filter,
page = page,
limit = RECORDS_LIMIT_PER_PAGE,
)
} else if let Some(orders) = orders {
format!(
"SELECT * FROM `{table}`{orders} LIMIT {page}, {limit}",
table = table.name,
orders = orders,
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
} else {
format!(
"SELECT * FROM `{}` LIMIT {page}, {limit}",
table.name,
"SELECT * FROM `{table}` LIMIT {page}, {limit}",
table = table.name,
page = page,
limit = RECORDS_LIMIT_PER_PAGE
)
@ -262,7 +281,7 @@ impl Pool for SqlitePool {
}
records.push(new_row)
}
Ok((headers, records))
Ok((concat_headers(headers, header_icons), records))
}
async fn get_columns(

Loading…
Cancel
Save