SlideShare a Scribd company logo
1 of 23
Query logging with
ProxySQL
Summary
2
This document explains how to use proxysql to leave query logs that users execute
directly by connecting to db.
This document includes the following things:
- How to install proxysql
- How to set up proxysql for query logging
- How to convert binary format query log to text format in proxysql
- How to install ELK stack and set up
Architecture
3
BIN TXT
Users
Admin
……
ProxySQL
DB Servers
FileBeat
Logstash
Elasticsearch Kibana
Set up ProxySQL 4
# rpm -ivh proxysql-2.0.4-1-centos67.x86_64.rpm
# service proxysql start
# mysql -u admin -padmin -h 127.0.0.1 -P6032 --prompt='Admin> '
Install & Start & Connect proxysql
https://github.com/sysown/proxysql/releases/tag/v2.0.4
5
Set up ProxySQL
-- Adding User
insert into mysql_users(username, password, transaction_persistent)
values('test_user','test1234', 0);
load mysql users to runtime;
save mysql users to disk;
-- Adding Server Information
insert into mysql_servers(hostname,hostgroup_id,port,comment)
values('10.xx.xx.01',0,3306,'customerdb');
insert into mysql_servers(hostname,hostgroup_id,port,comment)
values('10.xx.xx.02',1,3306,'productdb');
insert into mysql_servers(hostname,hostgroup_id,port,comment)
values('10.xx.xx.03',2,3306,'orderdb');
load mysql servers to runtime;
save mysql servers to disk;
Set up configuration
6
Set up ProxySQL
-- Set enable query logging
set mysql-eventslog_filename='queries.log';
set mysql-eventslog_filesize=1048576;
load mysql variables to runtime;
save mysql variables to disk;
insert into mysql_query_rules (rule_id, active, match_digest, log,apply)
values (1,1,'.',1,0);
insert into mysql_query_rules
(rule_id, active, flagIN, match_pattern, re_modifiers, replace_pattern, apply)
values (2,1,0,'n','GLOBAL',' ',0);
load mysql query rules to runtime;
save mysql query rules to disk;
Set up configuration
7
Set up ProxySQL
-- Add query rules for query routing
insert into mysql_query_rules(active,match_pattern, destination_hostgroup,apply)
values(1,'^SELECT .* customerdb',0,1);
insert into mysql_query_rules(active,match_pattern, destination_hostgroup,apply)
values(1,'^SELECT .* productdb',1,1);
insert into mysql_query_rules(active,match_pattern, destination_hostgroup,apply)
values(1,'^SELECT .* orderdb',2,1);
load mysql query rules to runtime;
save mysql query rules to disk;
Set up configuration
8
Set up ProxySQL
-- You can check to connect db
# mysql -utest_user -p'test1234' -h10.xx.xx.001 -P6033 -e"SELECT * from customerdb.member limit 10"
# mysql -utest_user -p'test1234' -h10.xx.xx.001 -P6033 -e"SELECT * from orderdb.order limit 10"
-- You can find a query log file(queries.log.0000000x)
# cd /var/lib/proxysql
# ls -al
total 7876
drwxr-xr-x 4 proxysql proxysql 4096 Jun 20 14:47 .
drwxr-xr-x. 27 root root 4096 Jun 19 18:26 ..
-rw------- 1 proxysql proxysql 2177193 Jun 20 14:47 proxysql.log
-rw-r--r-- 1 proxysql proxysql 6 Jun 20 11:18 proxysql.pid
-rw------- 1 proxysql proxysql 2781184 Jun 20 14:47 proxysql_stats.db
-rw------- 1 proxysql proxysql 89474 Jun 20 14:19 queries.log.00000001
Check query logging
9
Convert query log
ProxySQL leaves a query log in binary format.
We need to convert it to text format.
ProxySQL supports tool that convert binary format file to text
format.(eventslog_reader_sample)
# git clone https://github.com/sysown/proxysql.git
# cd proxysql/tools
# ll
total 128
-rwxr-xr-x 1 root root 2896 Jun 11 10:28 check_variables.pl
-rw-r--r-- 1 root root 4527 Jun 11 10:28 eventslog_reader_sample.cpp
-rw-r--r-- 1 root root 122 Jun 11 10:28 Makefile
-rwxr-xr-x 1 root root 16869 Jun 11 10:28 proxysql_galera_checker.sh
-rwxr-xr-x 1 root root 3574 Jun 11 10:28 proxysql_galera_writer.pl
10
Convert query log
You can compile eventslog_reader_sample.cpp file.
And you can convert binary query log file to text format.
# make
# ll
total 128
-rwxr-xr-x 1 root root 2896 Jun 11 10:28 check_variables.pl
-rwxr-xr-x 1 root root 87168 Jun 11 10:41 eventslog_reader_sample
-rw-r--r-- 1 root root 4527 Jun 11 10:28 eventslog_reader_sample.cpp
-rw-r--r-- 1 root root 122 Jun 11 10:28 Makefile
-rwxr-xr-x 1 root root 16869 Jun 11 10:28 proxysql_galera_checker.sh
-rwxr-xr-x 1 root root 3574 Jun 11 10:28 proxysql_galera_writer.pl
# ./eventslog_reader_sample /var/lib/proxysql/queries.log.00000001
or
# ./eventslog_reader_sample /var/lib/proxysql/queries.log.00000001 > result.txt
11
Convert query log
You can see two types of query logs, binary and text.
# tail -n 10 queries.log.00000001
��:�L14SHOW SESSION VARIABLES LIKE 'lower_case_table_names'w�� test_userinformation_schema10.xxx.xxx.x:1475310.xx.xx.xx:3306�KSw/���RSELECT current_user()x��
test_userinformation_schema10.xxx.xxx.xx:1475310.xx.xx.xx:3306��cw�X��Efw�X��
��l��SET CHARACTER SET utf8g�� test_userinformation_schema10.xxx.xxx.xxx:14753�����������pw�X���pw�X����8���Q0.xxx.x:14753x��
test_userinformation_schema10.xxx.xxx.x:1475310.xx.xx.xx:3306�K~w�X���w�X���$�f��SET SQL_SAFE_UPDATES=1o��
test_userinformation_schema10.xxx.xxx.x:14753����������‫ۏ‬w�X��‫ۏ‬w�X��yS��g�H�0.xxx.x:14753ION_ID()���
test_userinformation_schema10.xxx.xxx.x:1475310.xx.xx.xx:3306���w�X��h�w�X��Y�Z�~a��%SHOWorderdb10.xxx.xxx.x:14753����������ӹw�X��ӹw�X��0�o�/0.xxx.x:14753=1
# tail -n 10 queries_result.log.00000001
ProxySQL LOG QUERY: thread_id="30" username="test_user" schemaname="information_schema" client="10.xxx.xxx.x:63285" HID=0 server="10.xx.xx.xxx:3306" starttime="2019-06-20 17:35:20.108808" endtime="2019-06-20
17:35:20.109038" duration=230us digest="0xD8CCF296B01AC933"
select *
from orderdb.order
limit 105
ProxySQL LOG QUERY: thread_id="30" username="test_user" schemaname="information_schema" client="10.xxx.xxx.x:63285" HID=0 server="10.xx.xx.xxx:3306" starttime="2019-06-20 17:35:21.300584" endtime="2019-06-20
17:35:21.300738" duration=154us digest="0xD8CCF296B01AC933"
select *
from customerdb.custom
limit 100
12
Make cron job for converting query log file
Query log file will be created sequentially, we need a script for converting binary query
log file to text format file whenever new query log file is created.
BIN TXTqueries.log.00000001 ./result/queries_result.log.00000001
./eventslog_reader_sample queries.log.00000001 > ./result/queries_result.log.00000001
BIN TXTqueries.log.00000002 ./result/queries_result.log.00000002
BIN TXTqueries.log.00000003 ./result/queries_result.log.00000003
13
How user uses with workbench tool
The user who uses to connect directly db can use like this with workbench.
User has to use table name with database name(schema name) in query.
(ex. orderdb.order)
14
Set up FileBeat
Download
# tar -xzvf filebeat-7.1.1-linux-x86_64.tar.gz -C /opt/
# vi filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/lib/proxysql/result/queries_result.log.*
multiline.pattern: '^ProxySQL'
multiline.negate: true
multiline.match: after
output.logstash:
hosts: ["localhost:5044"]
https://www.elastic.co/kr/downloads/beats/filebeat
15
Set up Logstash
Download
# tar -xzvf logstash-7.1.1.tar.gz -C /opt/
# cd config
# vi logstash.conf
input {
beats {
port => 5044
}
}
---continue behind
https://www.elastic.co/kr/downloads/logstash
16
Set up Logstash
# vi logstash.conf
filter {
mutate {
gsub => ["message",""",""]
}
mutate {
gsub => ["message",". ",".0"]
}
grok {
match => {"message" => "ProxySQL LOG QUERY: thread_id=%{GREEDYDATA:thread_id} username=%{GREEDYDATA:username}
schemaname=%{GREEDYDATA:schemaname} client=%{GREEDYDATA:client} HID=%{GREEDYDATA:HID} server=%{GREEDYDATA:server}
starttime=%{TIMESTAMP_ISO8601:starttime} endtime=%{TIMESTAMP_ISO8601:endtime} duration=%{GREEDYDATA:duration}
digest=%{DATA:digest}n%{GREEDYDATA:query}"}
}
}
---continue behind
17
Set up Logstash
# vi logstash.conf
output {
elasticsearch {
hosts => ["http://[elasticsearch_server_ip]:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
18
Set up Elasticsearch
Download
# tar -xzvf elasticsearch-7.1.1-linux-x86_64.tar.gz -C /opt/
# cd config
# vi elasticsearch.yml
cluster.name: db-admin
node.name: first-node
path.data: /elasticsearch/data
path.logs: /elasticsearch/log
bootstrap.system_call_filter: false
network.host: [elasticsearch_server_ip]
http.port: 9200
cluster.initial_master_nodes: "first-node"
https://www.elastic.co/kr/downloads/elasticsearch
19
Set up Kibana
Download
# tar -xzvf kibana-7.1.1-linux-x86_64.tar.gz -C /opt/
# cd config
# vi kibana.yml
server.host: "[kibana_server_ip]"
elasticsearch.hosts: ["http://[elasticsearch_server_ip]:9200"]
https://www.elastic.co/kr/downloads/kibana
20
-- Start Filebeat
# cd /opt/filebeat-7.1.1-linux-x86_64
# ./filebeat -e -c filebeat.yml
-- Start Logstash
# cd /opt/logstash-7.1.1
# bin/logstash -f config/logstash.conf
-- Start Elasticsearch
# cd /opt/elasticsearch-7.1.1
# bin/elasticsearch
-- Start Kibana
# cd /opt/kibana-7.1.1-linux-x86_64
# bin/kibana
Start ELK stack
Kibana Web 21
Confirm the query logs in kibana web page
http://kibana_server_ip:5601
Reference 22
https://github.com/sysown/proxysql/wiki/Query-Logging
https://github.com/sysown/proxysql/issues/871
Thank You

More Related Content

What's hot

PostgreSQL Performance Tuning
PostgreSQL Performance TuningPostgreSQL Performance Tuning
PostgreSQL Performance Tuning
elliando dias
 
Understanding PostgreSQL LW Locks
Understanding PostgreSQL LW LocksUnderstanding PostgreSQL LW Locks
Understanding PostgreSQL LW Locks
Jignesh Shah
 

What's hot (20)

Deep dive into PostgreSQL statistics.
Deep dive into PostgreSQL statistics.Deep dive into PostgreSQL statistics.
Deep dive into PostgreSQL statistics.
 
MySQL Database Architectures - MySQL InnoDB ClusterSet 2021-11
MySQL Database Architectures - MySQL InnoDB ClusterSet 2021-11MySQL Database Architectures - MySQL InnoDB ClusterSet 2021-11
MySQL Database Architectures - MySQL InnoDB ClusterSet 2021-11
 
Redo log improvements MYSQL 8.0
Redo log improvements MYSQL 8.0Redo log improvements MYSQL 8.0
Redo log improvements MYSQL 8.0
 
PostgreSQL replication
PostgreSQL replicationPostgreSQL replication
PostgreSQL replication
 
PostgreSQL Performance Tuning
PostgreSQL Performance TuningPostgreSQL Performance Tuning
PostgreSQL Performance Tuning
 
[2018] MySQL 이중화 진화기
[2018] MySQL 이중화 진화기[2018] MySQL 이중화 진화기
[2018] MySQL 이중화 진화기
 
MySQL Administrator 2021 - 네오클로바
MySQL Administrator 2021 - 네오클로바MySQL Administrator 2021 - 네오클로바
MySQL Administrator 2021 - 네오클로바
 
Best Practices for Becoming an Exceptional Postgres DBA
Best Practices for Becoming an Exceptional Postgres DBA Best Practices for Becoming an Exceptional Postgres DBA
Best Practices for Becoming an Exceptional Postgres DBA
 
Optimizing MariaDB for maximum performance
Optimizing MariaDB for maximum performanceOptimizing MariaDB for maximum performance
Optimizing MariaDB for maximum performance
 
MariaDB 10.5 binary install (바이너리 설치)
MariaDB 10.5 binary install (바이너리 설치)MariaDB 10.5 binary install (바이너리 설치)
MariaDB 10.5 binary install (바이너리 설치)
 
MySQL GTID 시작하기
MySQL GTID 시작하기MySQL GTID 시작하기
MySQL GTID 시작하기
 
Understanding PostgreSQL LW Locks
Understanding PostgreSQL LW LocksUnderstanding PostgreSQL LW Locks
Understanding PostgreSQL LW Locks
 
PostgreSQL and RAM usage
PostgreSQL and RAM usagePostgreSQL and RAM usage
PostgreSQL and RAM usage
 
Percona xtrabackup - MySQL Meetup @ Mumbai
Percona xtrabackup - MySQL Meetup @ MumbaiPercona xtrabackup - MySQL Meetup @ Mumbai
Percona xtrabackup - MySQL Meetup @ Mumbai
 
Highly efficient backups with percona xtrabackup
Highly efficient backups with percona xtrabackupHighly efficient backups with percona xtrabackup
Highly efficient backups with percona xtrabackup
 
Keepalived+MaxScale+MariaDB_운영매뉴얼_1.0.docx
Keepalived+MaxScale+MariaDB_운영매뉴얼_1.0.docxKeepalived+MaxScale+MariaDB_운영매뉴얼_1.0.docx
Keepalived+MaxScale+MariaDB_운영매뉴얼_1.0.docx
 
Mastering PostgreSQL Administration
Mastering PostgreSQL AdministrationMastering PostgreSQL Administration
Mastering PostgreSQL Administration
 
MySQL Database Architectures - 2022-08
MySQL Database Architectures - 2022-08MySQL Database Architectures - 2022-08
MySQL Database Architectures - 2022-08
 
PostgreSQL Replication High Availability Methods
PostgreSQL Replication High Availability MethodsPostgreSQL Replication High Availability Methods
PostgreSQL Replication High Availability Methods
 
MySQL 상태 메시지 분석 및 활용
MySQL 상태 메시지 분석 및 활용MySQL 상태 메시지 분석 및 활용
MySQL 상태 메시지 분석 및 활용
 

Similar to Query logging with proxysql

X64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 newX64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 new
Yiwei Ma
 

Similar to Query logging with proxysql (20)

MySQL Audit using Percona audit plugin and ELK
MySQL Audit using Percona audit plugin and ELKMySQL Audit using Percona audit plugin and ELK
MySQL Audit using Percona audit plugin and ELK
 
HandsOn ProxySQL Tutorial - PLSC18
HandsOn ProxySQL Tutorial - PLSC18HandsOn ProxySQL Tutorial - PLSC18
HandsOn ProxySQL Tutorial - PLSC18
 
ProxySQL & PXC(Query routing and Failover Test)
ProxySQL & PXC(Query routing and Failover Test)ProxySQL & PXC(Query routing and Failover Test)
ProxySQL & PXC(Query routing and Failover Test)
 
High Availability Content Caching with NGINX
High Availability Content Caching with NGINXHigh Availability Content Caching with NGINX
High Availability Content Caching with NGINX
 
High Availability Content Caching with NGINX
High Availability Content Caching with NGINXHigh Availability Content Caching with NGINX
High Availability Content Caching with NGINX
 
Intro ProxySQL
Intro ProxySQLIntro ProxySQL
Intro ProxySQL
 
Automatically scaling your Kubernetes workloads - SVC201-S - Chicago AWS Summit
Automatically scaling your Kubernetes workloads - SVC201-S - Chicago AWS SummitAutomatically scaling your Kubernetes workloads - SVC201-S - Chicago AWS Summit
Automatically scaling your Kubernetes workloads - SVC201-S - Chicago AWS Summit
 
Autoscaling Your Kubernetes Workloads (Sponsored by Datadog) - AWS Summit Sydney
Autoscaling Your Kubernetes Workloads (Sponsored by Datadog) - AWS Summit SydneyAutoscaling Your Kubernetes Workloads (Sponsored by Datadog) - AWS Summit Sydney
Autoscaling Your Kubernetes Workloads (Sponsored by Datadog) - AWS Summit Sydney
 
Deploying Percona XtraDB Cluster in Openshift
Deploying Percona XtraDB Cluster in OpenshiftDeploying Percona XtraDB Cluster in Openshift
Deploying Percona XtraDB Cluster in Openshift
 
Automatically scaling your Kubernetes workloads - SVC210-S - Santa Clara AWS ...
Automatically scaling your Kubernetes workloads - SVC210-S - Santa Clara AWS ...Automatically scaling your Kubernetes workloads - SVC210-S - Santa Clara AWS ...
Automatically scaling your Kubernetes workloads - SVC210-S - Santa Clara AWS ...
 
Solving anything in VCL
Solving anything in VCLSolving anything in VCL
Solving anything in VCL
 
Install elasticsearch, logstash and kibana
Install elasticsearch, logstash and kibana Install elasticsearch, logstash and kibana
Install elasticsearch, logstash and kibana
 
JavaScript and Friends August 20th, 20201 -- MySQL Shell and JavaScript
JavaScript and Friends August 20th, 20201 -- MySQL Shell and JavaScriptJavaScript and Friends August 20th, 20201 -- MySQL Shell and JavaScript
JavaScript and Friends August 20th, 20201 -- MySQL Shell and JavaScript
 
ProxySQL for MySQL
ProxySQL for MySQLProxySQL for MySQL
ProxySQL for MySQL
 
Automatically Scaling Your Kubernetes Workloads - SVC209-S - Anaheim AWS Summit
Automatically Scaling Your Kubernetes Workloads - SVC209-S - Anaheim AWS SummitAutomatically Scaling Your Kubernetes Workloads - SVC209-S - Anaheim AWS Summit
Automatically Scaling Your Kubernetes Workloads - SVC209-S - Anaheim AWS Summit
 
Incrementalism: An Industrial Strategy For Adopting Modern Automation
Incrementalism: An Industrial Strategy For Adopting Modern AutomationIncrementalism: An Industrial Strategy For Adopting Modern Automation
Incrementalism: An Industrial Strategy For Adopting Modern Automation
 
LogStash in action
LogStash in actionLogStash in action
LogStash in action
 
X64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 newX64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 new
 
Elasticsearch on Kubernetes
Elasticsearch on KubernetesElasticsearch on Kubernetes
Elasticsearch on Kubernetes
 
Why Managed Service Providers Should Embrace Container Technology
Why Managed Service Providers Should Embrace Container TechnologyWhy Managed Service Providers Should Embrace Container Technology
Why Managed Service Providers Should Embrace Container Technology
 

Recently uploaded

Why Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire businessWhy Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire business
panagenda
 
Architecting Cloud Native Applications
Architecting Cloud Native ApplicationsArchitecting Cloud Native Applications
Architecting Cloud Native Applications
WSO2
 
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers:  A Deep Dive into Serverless Spatial Data and FMECloud Frontiers:  A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Safe Software
 
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers:  A Deep Dive into Serverless Spatial Data and FMECloud Frontiers:  A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Safe Software
 

Recently uploaded (20)

Why Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire businessWhy Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire business
 
Architecting Cloud Native Applications
Architecting Cloud Native ApplicationsArchitecting Cloud Native Applications
Architecting Cloud Native Applications
 
TrustArc Webinar - Unlock the Power of AI-Driven Data Discovery
TrustArc Webinar - Unlock the Power of AI-Driven Data DiscoveryTrustArc Webinar - Unlock the Power of AI-Driven Data Discovery
TrustArc Webinar - Unlock the Power of AI-Driven Data Discovery
 
EMPOWERMENT TECHNOLOGY GRADE 11 QUARTER 2 REVIEWER
EMPOWERMENT TECHNOLOGY GRADE 11 QUARTER 2 REVIEWEREMPOWERMENT TECHNOLOGY GRADE 11 QUARTER 2 REVIEWER
EMPOWERMENT TECHNOLOGY GRADE 11 QUARTER 2 REVIEWER
 
MS Copilot expands with MS Graph connectors
MS Copilot expands with MS Graph connectorsMS Copilot expands with MS Graph connectors
MS Copilot expands with MS Graph connectors
 
Connector Corner: Accelerate revenue generation using UiPath API-centric busi...
Connector Corner: Accelerate revenue generation using UiPath API-centric busi...Connector Corner: Accelerate revenue generation using UiPath API-centric busi...
Connector Corner: Accelerate revenue generation using UiPath API-centric busi...
 
Introduction to Multilingual Retrieval Augmented Generation (RAG)
Introduction to Multilingual Retrieval Augmented Generation (RAG)Introduction to Multilingual Retrieval Augmented Generation (RAG)
Introduction to Multilingual Retrieval Augmented Generation (RAG)
 
"I see eyes in my soup": How Delivery Hero implemented the safety system for ...
"I see eyes in my soup": How Delivery Hero implemented the safety system for ..."I see eyes in my soup": How Delivery Hero implemented the safety system for ...
"I see eyes in my soup": How Delivery Hero implemented the safety system for ...
 
Web Form Automation for Bonterra Impact Management (fka Social Solutions Apri...
Web Form Automation for Bonterra Impact Management (fka Social Solutions Apri...Web Form Automation for Bonterra Impact Management (fka Social Solutions Apri...
Web Form Automation for Bonterra Impact Management (fka Social Solutions Apri...
 
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers:  A Deep Dive into Serverless Spatial Data and FMECloud Frontiers:  A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
 
Apidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, Adobe
Apidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, AdobeApidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, Adobe
Apidays New York 2024 - Scaling API-first by Ian Reasor and Radu Cotescu, Adobe
 
Artificial Intelligence Chap.5 : Uncertainty
Artificial Intelligence Chap.5 : UncertaintyArtificial Intelligence Chap.5 : Uncertainty
Artificial Intelligence Chap.5 : Uncertainty
 
presentation ICT roal in 21st century education
presentation ICT roal in 21st century educationpresentation ICT roal in 21st century education
presentation ICT roal in 21st century education
 
WSO2's API Vision: Unifying Control, Empowering Developers
WSO2's API Vision: Unifying Control, Empowering DevelopersWSO2's API Vision: Unifying Control, Empowering Developers
WSO2's API Vision: Unifying Control, Empowering Developers
 
Mcleodganj Call Girls 🥰 8617370543 Service Offer VIP Hot Model
Mcleodganj Call Girls 🥰 8617370543 Service Offer VIP Hot ModelMcleodganj Call Girls 🥰 8617370543 Service Offer VIP Hot Model
Mcleodganj Call Girls 🥰 8617370543 Service Offer VIP Hot Model
 
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers:  A Deep Dive into Serverless Spatial Data and FMECloud Frontiers:  A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
 
ICT role in 21st century education and its challenges
ICT role in 21st century education and its challengesICT role in 21st century education and its challenges
ICT role in 21st century education and its challenges
 
Biography Of Angeliki Cooney | Senior Vice President Life Sciences | Albany, ...
Biography Of Angeliki Cooney | Senior Vice President Life Sciences | Albany, ...Biography Of Angeliki Cooney | Senior Vice President Life Sciences | Albany, ...
Biography Of Angeliki Cooney | Senior Vice President Life Sciences | Albany, ...
 
Apidays New York 2024 - Accelerating FinTech Innovation by Vasa Krishnan, Fin...
Apidays New York 2024 - Accelerating FinTech Innovation by Vasa Krishnan, Fin...Apidays New York 2024 - Accelerating FinTech Innovation by Vasa Krishnan, Fin...
Apidays New York 2024 - Accelerating FinTech Innovation by Vasa Krishnan, Fin...
 
Strategies for Landing an Oracle DBA Job as a Fresher
Strategies for Landing an Oracle DBA Job as a FresherStrategies for Landing an Oracle DBA Job as a Fresher
Strategies for Landing an Oracle DBA Job as a Fresher
 

Query logging with proxysql

  • 2. Summary 2 This document explains how to use proxysql to leave query logs that users execute directly by connecting to db. This document includes the following things: - How to install proxysql - How to set up proxysql for query logging - How to convert binary format query log to text format in proxysql - How to install ELK stack and set up
  • 4. Set up ProxySQL 4 # rpm -ivh proxysql-2.0.4-1-centos67.x86_64.rpm # service proxysql start # mysql -u admin -padmin -h 127.0.0.1 -P6032 --prompt='Admin> ' Install & Start & Connect proxysql https://github.com/sysown/proxysql/releases/tag/v2.0.4
  • 5. 5 Set up ProxySQL -- Adding User insert into mysql_users(username, password, transaction_persistent) values('test_user','test1234', 0); load mysql users to runtime; save mysql users to disk; -- Adding Server Information insert into mysql_servers(hostname,hostgroup_id,port,comment) values('10.xx.xx.01',0,3306,'customerdb'); insert into mysql_servers(hostname,hostgroup_id,port,comment) values('10.xx.xx.02',1,3306,'productdb'); insert into mysql_servers(hostname,hostgroup_id,port,comment) values('10.xx.xx.03',2,3306,'orderdb'); load mysql servers to runtime; save mysql servers to disk; Set up configuration
  • 6. 6 Set up ProxySQL -- Set enable query logging set mysql-eventslog_filename='queries.log'; set mysql-eventslog_filesize=1048576; load mysql variables to runtime; save mysql variables to disk; insert into mysql_query_rules (rule_id, active, match_digest, log,apply) values (1,1,'.',1,0); insert into mysql_query_rules (rule_id, active, flagIN, match_pattern, re_modifiers, replace_pattern, apply) values (2,1,0,'n','GLOBAL',' ',0); load mysql query rules to runtime; save mysql query rules to disk; Set up configuration
  • 7. 7 Set up ProxySQL -- Add query rules for query routing insert into mysql_query_rules(active,match_pattern, destination_hostgroup,apply) values(1,'^SELECT .* customerdb',0,1); insert into mysql_query_rules(active,match_pattern, destination_hostgroup,apply) values(1,'^SELECT .* productdb',1,1); insert into mysql_query_rules(active,match_pattern, destination_hostgroup,apply) values(1,'^SELECT .* orderdb',2,1); load mysql query rules to runtime; save mysql query rules to disk; Set up configuration
  • 8. 8 Set up ProxySQL -- You can check to connect db # mysql -utest_user -p'test1234' -h10.xx.xx.001 -P6033 -e"SELECT * from customerdb.member limit 10" # mysql -utest_user -p'test1234' -h10.xx.xx.001 -P6033 -e"SELECT * from orderdb.order limit 10" -- You can find a query log file(queries.log.0000000x) # cd /var/lib/proxysql # ls -al total 7876 drwxr-xr-x 4 proxysql proxysql 4096 Jun 20 14:47 . drwxr-xr-x. 27 root root 4096 Jun 19 18:26 .. -rw------- 1 proxysql proxysql 2177193 Jun 20 14:47 proxysql.log -rw-r--r-- 1 proxysql proxysql 6 Jun 20 11:18 proxysql.pid -rw------- 1 proxysql proxysql 2781184 Jun 20 14:47 proxysql_stats.db -rw------- 1 proxysql proxysql 89474 Jun 20 14:19 queries.log.00000001 Check query logging
  • 9. 9 Convert query log ProxySQL leaves a query log in binary format. We need to convert it to text format. ProxySQL supports tool that convert binary format file to text format.(eventslog_reader_sample) # git clone https://github.com/sysown/proxysql.git # cd proxysql/tools # ll total 128 -rwxr-xr-x 1 root root 2896 Jun 11 10:28 check_variables.pl -rw-r--r-- 1 root root 4527 Jun 11 10:28 eventslog_reader_sample.cpp -rw-r--r-- 1 root root 122 Jun 11 10:28 Makefile -rwxr-xr-x 1 root root 16869 Jun 11 10:28 proxysql_galera_checker.sh -rwxr-xr-x 1 root root 3574 Jun 11 10:28 proxysql_galera_writer.pl
  • 10. 10 Convert query log You can compile eventslog_reader_sample.cpp file. And you can convert binary query log file to text format. # make # ll total 128 -rwxr-xr-x 1 root root 2896 Jun 11 10:28 check_variables.pl -rwxr-xr-x 1 root root 87168 Jun 11 10:41 eventslog_reader_sample -rw-r--r-- 1 root root 4527 Jun 11 10:28 eventslog_reader_sample.cpp -rw-r--r-- 1 root root 122 Jun 11 10:28 Makefile -rwxr-xr-x 1 root root 16869 Jun 11 10:28 proxysql_galera_checker.sh -rwxr-xr-x 1 root root 3574 Jun 11 10:28 proxysql_galera_writer.pl # ./eventslog_reader_sample /var/lib/proxysql/queries.log.00000001 or # ./eventslog_reader_sample /var/lib/proxysql/queries.log.00000001 > result.txt
  • 11. 11 Convert query log You can see two types of query logs, binary and text. # tail -n 10 queries.log.00000001 ��:�L14SHOW SESSION VARIABLES LIKE 'lower_case_table_names'w�� test_userinformation_schema10.xxx.xxx.x:1475310.xx.xx.xx:3306�KSw/���RSELECT current_user()x�� test_userinformation_schema10.xxx.xxx.xx:1475310.xx.xx.xx:3306��cw�X��Efw�X�� ��l��SET CHARACTER SET utf8g�� test_userinformation_schema10.xxx.xxx.xxx:14753�����������pw�X���pw�X����8���Q0.xxx.x:14753x�� test_userinformation_schema10.xxx.xxx.x:1475310.xx.xx.xx:3306�K~w�X���w�X���$�f��SET SQL_SAFE_UPDATES=1o�� test_userinformation_schema10.xxx.xxx.x:14753����������‫ۏ‬w�X��‫ۏ‬w�X��yS��g�H�0.xxx.x:14753ION_ID()��� test_userinformation_schema10.xxx.xxx.x:1475310.xx.xx.xx:3306���w�X��h�w�X��Y�Z�~a��%SHOWorderdb10.xxx.xxx.x:14753����������ӹw�X��ӹw�X��0�o�/0.xxx.x:14753=1 # tail -n 10 queries_result.log.00000001 ProxySQL LOG QUERY: thread_id="30" username="test_user" schemaname="information_schema" client="10.xxx.xxx.x:63285" HID=0 server="10.xx.xx.xxx:3306" starttime="2019-06-20 17:35:20.108808" endtime="2019-06-20 17:35:20.109038" duration=230us digest="0xD8CCF296B01AC933" select * from orderdb.order limit 105 ProxySQL LOG QUERY: thread_id="30" username="test_user" schemaname="information_schema" client="10.xxx.xxx.x:63285" HID=0 server="10.xx.xx.xxx:3306" starttime="2019-06-20 17:35:21.300584" endtime="2019-06-20 17:35:21.300738" duration=154us digest="0xD8CCF296B01AC933" select * from customerdb.custom limit 100
  • 12. 12 Make cron job for converting query log file Query log file will be created sequentially, we need a script for converting binary query log file to text format file whenever new query log file is created. BIN TXTqueries.log.00000001 ./result/queries_result.log.00000001 ./eventslog_reader_sample queries.log.00000001 > ./result/queries_result.log.00000001 BIN TXTqueries.log.00000002 ./result/queries_result.log.00000002 BIN TXTqueries.log.00000003 ./result/queries_result.log.00000003
  • 13. 13 How user uses with workbench tool The user who uses to connect directly db can use like this with workbench. User has to use table name with database name(schema name) in query. (ex. orderdb.order)
  • 14. 14 Set up FileBeat Download # tar -xzvf filebeat-7.1.1-linux-x86_64.tar.gz -C /opt/ # vi filebeat.yml filebeat.inputs: - type: log enabled: true paths: - /var/lib/proxysql/result/queries_result.log.* multiline.pattern: '^ProxySQL' multiline.negate: true multiline.match: after output.logstash: hosts: ["localhost:5044"] https://www.elastic.co/kr/downloads/beats/filebeat
  • 15. 15 Set up Logstash Download # tar -xzvf logstash-7.1.1.tar.gz -C /opt/ # cd config # vi logstash.conf input { beats { port => 5044 } } ---continue behind https://www.elastic.co/kr/downloads/logstash
  • 16. 16 Set up Logstash # vi logstash.conf filter { mutate { gsub => ["message",""",""] } mutate { gsub => ["message",". ",".0"] } grok { match => {"message" => "ProxySQL LOG QUERY: thread_id=%{GREEDYDATA:thread_id} username=%{GREEDYDATA:username} schemaname=%{GREEDYDATA:schemaname} client=%{GREEDYDATA:client} HID=%{GREEDYDATA:HID} server=%{GREEDYDATA:server} starttime=%{TIMESTAMP_ISO8601:starttime} endtime=%{TIMESTAMP_ISO8601:endtime} duration=%{GREEDYDATA:duration} digest=%{DATA:digest}n%{GREEDYDATA:query}"} } } ---continue behind
  • 17. 17 Set up Logstash # vi logstash.conf output { elasticsearch { hosts => ["http://[elasticsearch_server_ip]:9200"] index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" } }
  • 18. 18 Set up Elasticsearch Download # tar -xzvf elasticsearch-7.1.1-linux-x86_64.tar.gz -C /opt/ # cd config # vi elasticsearch.yml cluster.name: db-admin node.name: first-node path.data: /elasticsearch/data path.logs: /elasticsearch/log bootstrap.system_call_filter: false network.host: [elasticsearch_server_ip] http.port: 9200 cluster.initial_master_nodes: "first-node" https://www.elastic.co/kr/downloads/elasticsearch
  • 19. 19 Set up Kibana Download # tar -xzvf kibana-7.1.1-linux-x86_64.tar.gz -C /opt/ # cd config # vi kibana.yml server.host: "[kibana_server_ip]" elasticsearch.hosts: ["http://[elasticsearch_server_ip]:9200"] https://www.elastic.co/kr/downloads/kibana
  • 20. 20 -- Start Filebeat # cd /opt/filebeat-7.1.1-linux-x86_64 # ./filebeat -e -c filebeat.yml -- Start Logstash # cd /opt/logstash-7.1.1 # bin/logstash -f config/logstash.conf -- Start Elasticsearch # cd /opt/elasticsearch-7.1.1 # bin/elasticsearch -- Start Kibana # cd /opt/kibana-7.1.1-linux-x86_64 # bin/kibana Start ELK stack
  • 21. Kibana Web 21 Confirm the query logs in kibana web page http://kibana_server_ip:5601