From f4e7877c6e7e22076e7eff419a9198683a49d0e8 Mon Sep 17 00:00:00 2001 From: kyoto7250 <50972773+kyoto7250@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:48:01 +0900 Subject: [PATCH] impl for passing order and header_icons --- src/database/mod.rs | 40 +++++++++++++++++++++++++ src/database/mysql.rs | 35 +++++++++++++++++----- src/database/postgres.rs | 63 ++++++++++++++++++++++++++++++++++++---- src/database/sqlite.rs | 29 ++++++++++++++---- 4 files changed, 149 insertions(+), 18 deletions(-) diff --git a/src/database/mod.rs b/src/database/mod.rs index 46a771a..7cad19f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -22,6 +22,8 @@ pub trait Pool: Send + Sync { table: &Table, page: u16, filter: Option, + orders: Option, + header_icons: Option>, ) -> anyhow::Result<(Vec, Vec>)>; async fn get_columns( &self, @@ -46,6 +48,20 @@ pub trait Pool: Send + Sync { async fn close(&self); } +fn concat_headers(headers: Vec, header_icons: Option>) -> Vec { + if let Some(header_icons) = &header_icons { + let mut new_headers = vec![String::new(); headers.len()]; + for (index, header) in headers.iter().enumerate() { + new_headers[index] = format!("{} {}", header, header_icons[index]) + .trim() + .to_string(); + } + return new_headers; + } else { + return headers; + } +} + pub enum ExecuteResult { Read { headers: Vec, @@ -69,3 +85,27 @@ macro_rules! get_or_null { $value.map_or("NULL".to_string(), |v| v.to_string()) }; } + +#[cfg(test)] +mod test { + use super::concat_headers; + #[test] + fn test_concat_headers() { + let headers = vec![ + "ID".to_string(), + "NAME".to_string(), + "TIMESTAMP".to_string(), + ]; + let header_icons = vec!["".to_string(), "↑1".to_string(), "↓2".to_string()]; + let concat_headers: Vec = concat_headers(headers, Some(header_icons)); + + assert_eq!( + concat_headers, + vec![ + "ID".to_string(), + "NAME ↑1".to_string(), + "TIMESTAMP ↓2".to_string() + ] + ) + } +} diff --git a/src/database/mysql.rs b/src/database/mysql.rs index 160306f..c810cdd 100644 --- a/src/database/mysql.rs +++ b/src/database/mysql.rs @@ -1,6 +1,6 @@ use crate::get_or_null; -use super::{ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE}; +use super::{concat_headers, ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE}; use async_trait::async_trait; use chrono::{NaiveDate, NaiveDateTime, NaiveTime}; use database_tree::{Child, Database, Table}; @@ -228,21 +228,42 @@ impl Pool for MySqlPool { table: &Table, page: u16, filter: Option, + orders: Option, + header_icons: Option>, ) -> anyhow::Result<(Vec, Vec>)> { - let query = if let Some(filter) = filter { + let query = if let (Some(filter), Some(orders)) = (&filter, &orders) { + format!( + "SELECT * FROM `{database}`.`{table}` WHERE {filter} {orders} LIMIT {page}, {limit}", + database = database.name, + table = table.name, + filter = filter, + page = page, + limit = RECORDS_LIMIT_PER_PAGE, + orders = orders + ) + } else if let Some(filter) = filter { format!( "SELECT * FROM `{database}`.`{table}` WHERE {filter} LIMIT {page}, {limit}", database = database.name, table = table.name, filter = filter, page = page, - limit = RECORDS_LIMIT_PER_PAGE + limit = RECORDS_LIMIT_PER_PAGE, + ) + } else if let Some(orders) = orders { + format!( + "SELECT * FROM `{database}`.`{table}` {orders} LIMIT {page}, {limit}", + database = database.name, + table = table.name, + orders = orders, + page = page, + limit = RECORDS_LIMIT_PER_PAGE, ) } else { format!( - "SELECT * FROM `{}`.`{}` LIMIT {page}, {limit}", - database.name, - table.name, + "SELECT * FROM `{database}`.`{table}` LIMIT {page}, {limit}", + database = database.name, + table = table.name, page = page, limit = RECORDS_LIMIT_PER_PAGE ) @@ -262,7 +283,7 @@ impl Pool for MySqlPool { } records.push(new_row) } - Ok((headers, records)) + Ok((concat_headers(headers, header_icons), records)) } async fn get_columns( diff --git a/src/database/postgres.rs b/src/database/postgres.rs index ab954b0..78b6bc0 100644 --- a/src/database/postgres.rs +++ b/src/database/postgres.rs @@ -1,6 +1,6 @@ use crate::get_or_null; -use super::{ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE}; +use super::{concat_headers, ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE}; use async_trait::async_trait; use chrono::{NaiveDate, NaiveDateTime, NaiveTime}; use database_tree::{Child, Database, Schema, Table}; @@ -245,8 +245,21 @@ impl Pool for PostgresPool { table: &Table, page: u16, filter: Option, + orders: Option, + header_icons: Option>, ) -> anyhow::Result<(Vec, Vec>)> { - let query = if let Some(filter) = filter.as_ref() { + let query = if let (Some(filter), Some(orders)) = (&filter, &orders) { + format!( + r#"SELECT * FROM "{database}"."{table_schema}"."{table}" WHERE {filter} {orders} LIMIT {limit} OFFSET {page}"#, + database = database.name, + table = table.name, + filter = filter, + orders = orders, + table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()), + page = page, + limit = RECORDS_LIMIT_PER_PAGE + ) + } else if let Some(filter) = &filter { format!( r#"SELECT * FROM "{database}"."{table_schema}"."{table}" WHERE {filter} LIMIT {limit} OFFSET {page}"#, database = database.name, @@ -256,6 +269,16 @@ impl Pool for PostgresPool { page = page, limit = RECORDS_LIMIT_PER_PAGE ) + } else if let Some(orders) = &orders { + format!( + r#"SELECT * FROM "{database}"."{table_schema}"."{table}" {orders} LIMIT {limit} OFFSET {page}"#, + database = database.name, + table = table.name, + orders = orders, + table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()), + page = page, + limit = RECORDS_LIMIT_PER_PAGE + ) } else { format!( r#"SELECT * FROM "{database}"."{table_schema}"."{table}" LIMIT {limit} OFFSET {page}"#, @@ -283,8 +306,14 @@ impl Pool for PostgresPool { Err(_) => { if json_records.is_none() { json_records = Some( - self.get_json_records(database, table, page, filter.clone()) - .await?, + self.get_json_records( + database, + table, + page, + filter.clone(), + orders.clone(), + ) + .await?, ); } if let Some(json_records) = &json_records { @@ -315,7 +344,7 @@ impl Pool for PostgresPool { } records.push(new_row) } - Ok((headers, records)) + Ok((concat_headers(headers, header_icons), records)) } async fn get_columns( @@ -479,8 +508,20 @@ impl PostgresPool { table: &Table, page: u16, filter: Option, + orders: Option, ) -> anyhow::Result> { - let query = if let Some(filter) = filter { + let query = if let (Some(filter), Some(orders)) = (&filter, &orders) { + format!( + r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" WHERE {filter} {orders} LIMIT {limit} OFFSET {page}"#, + database = database.name, + table = table.name, + filter = filter, + orders = orders, + table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()), + page = page, + limit = RECORDS_LIMIT_PER_PAGE + ) + } else if let Some(filter) = filter { format!( r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" WHERE {filter} LIMIT {limit} OFFSET {page}"#, database = database.name, @@ -490,6 +531,16 @@ impl PostgresPool { page = page, limit = RECORDS_LIMIT_PER_PAGE ) + } else if let Some(orders) = orders { + format!( + r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" {orders} LIMIT {limit} OFFSET {page}"#, + database = database.name, + table = table.name, + orders = orders, + table_schema = table.schema.clone().unwrap_or_else(|| "public".to_string()), + page = page, + limit = RECORDS_LIMIT_PER_PAGE + ) } else { format!( r#"SELECT to_json("{table}".*) FROM "{database}"."{table_schema}"."{table}" LIMIT {limit} OFFSET {page}"#, diff --git a/src/database/sqlite.rs b/src/database/sqlite.rs index 83063c7..166f069 100644 --- a/src/database/sqlite.rs +++ b/src/database/sqlite.rs @@ -1,6 +1,6 @@ use crate::get_or_null; -use super::{ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE}; +use super::{concat_headers, ExecuteResult, Pool, TableRow, RECORDS_LIMIT_PER_PAGE}; use async_trait::async_trait; use chrono::NaiveDateTime; use database_tree::{Child, Database, Table}; @@ -230,19 +230,38 @@ impl Pool for SqlitePool { table: &Table, page: u16, filter: Option, + orders: Option, + header_icons: Option>, ) -> anyhow::Result<(Vec, Vec>)> { - let query = if let Some(filter) = filter { + let query = if let (Some(filter), Some(orders)) = (&filter, &orders) { + format!( + "SELECT * FROM `{table}` WHERE {filter} {orders} LIMIT {page}, {limit}", + table = table.name, + filter = filter, + page = page, + limit = RECORDS_LIMIT_PER_PAGE, + orders = orders + ) + } else if let Some(filter) = filter { format!( "SELECT * FROM `{table}` WHERE {filter} LIMIT {page}, {limit}", table = table.name, filter = filter, page = page, + limit = RECORDS_LIMIT_PER_PAGE, + ) + } else if let Some(orders) = orders { + format!( + "SELECT * FROM `{table}`{orders} LIMIT {page}, {limit}", + table = table.name, + orders = orders, + page = page, limit = RECORDS_LIMIT_PER_PAGE ) } else { format!( - "SELECT * FROM `{}` LIMIT {page}, {limit}", - table.name, + "SELECT * FROM `{table}` LIMIT {page}, {limit}", + table = table.name, page = page, limit = RECORDS_LIMIT_PER_PAGE ) @@ -262,7 +281,7 @@ impl Pool for SqlitePool { } records.push(new_row) } - Ok((headers, records)) + Ok((concat_headers(headers, header_icons), records)) } async fn get_columns(