Cloudflare
关闭IPV6 12345curl -X PATCH "https://api.cloudflare.com/client/v4/zones/cfa068f2a1e282a2731ba29a0a24b052/settings/ipv6" \ -H "X-Auth-Email: [email protected]" \ -H "X-Auth-Key: 830419c40277b1cff929397d7c08b2012c2a" \ -H "Content-Type: application/json" \ --data '{"value":"off"}' URL上的地址: X-Auth-Key:
Harbor
下载离线安装包 12345# 解压tzr -zxvf harbo.xxxxx.tgz# cp 下模板文件cp ./harbor.yml.tmpl ./harbor.yml 修改配置文件1sudo vim harbor.yml 123456789101112131415161718192021222324252627282930313233343536373839# 有域名就弄域名,这里是在内网就用的iphostname: 10.0.2.4# http related confighttp: # port for http, default is 80. If https enabled, this port will redirect to https port port: 80# 内网不需要https,https和证书就注释了# https related config#https: # https port for harbor, default is 443 # port: 443 # The path of cert and key files for nginx # certificate: /your/certificate/path # private_key: /your/private/key/path# Uncomment external_url if you want to enable external proxy# And when it enabled the hostname will no longer used# external_url: https://reg.mydomain.com:8433# The initial password of Harbor admin# It only works in first time to install harbor# Remember Change the admin password from UI after launching Harbor.harbor_admin_password: 123456# Harbor DB configurationdatabase: # The password for the root user of Harbor DB. Change this before any production use. password: root123 # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. max_idle_conns: 50 # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. # Note: the default number of connections is 100 for postgres. max_open_conns: 100# The default data volume# 这里要配置好目录data_volume: /home/yunxian/harbor/data 运行12345# 每次修改完配置文件后都需要运行./prepare# 运行./install.sh 访问访问:http://10.0.2.4/ 账号密码:admin / 123456 解决客户端的https问题1234[root@client ~]# docker login 10.19.46.15Username: adminPassword: Error response from daemon: Get https://10.19.46.15/v2/: dial tcp 10.19.46.15:443: connect: connection refused 这是因为docker1.3.2版本开始默认docker registry使用的是https,我们设置Harbor默认http方式,所以当执行用docker login、pull、push等命令操作非https的docker regsitry的时就会报错。 解决办法 vim docker-compose.yml 编辑客户端 1234567891011vim /etc/docker/daemon.json{ "insecure-registries": [ "10.19.46.15(harbor的IP)" ]}# 重启dockersystemctl daemon-reloadsystemctl restart docker 再次登录 1234567[root@client ~]# docker login 10.19.46.15 Username: adminPassword: WARNING! Your password will be stored unencrypted in /root/.docker/config.json.Configure a credential helper to remove this warning. Seehttps://docs.docker.com/engine/reference/commandline/login/#credentials-storeLogin Succeeded 技巧pull 的命令 推送和打tag的命令
C++
基础注释作用:在代码中加一些说明和解释,方便自己或其他程序员程序员阅读代码 123// 单行注释/* 多行注释 */ 提示:编译器在编译代码时,会忽略注释的内容 变量作用:给一段指定的内存空间起名,方便操作这段内存 语法:数据类型 变量名 = 初始值; 12345678910111213141516#include<iostream>using namespace std;int main() { //变量的定义 //语法:数据类型 变量名 = 初始值 int a = 10; cout << "a = " << a << endl; system("pause"); return 0;} 注意:C++在创建变量时,必须给变量一个初始值,否则会报错 常量作用:用于记录程序中不可更改的数据 C++定义常量两种方式 #define 宏常量: #define 常量名 常量值` 通常在文件上方定义,表示一个常量 const修饰的变量 const 数据类型 常量名 = 常量值 通常在变量定义前加关键字const,修饰该变量为常量,不可修改 1234567891011121314151617//1、宏常量#define day 7int main() { cout << "一周里总共有 " << day << " 天" << endl; //day = 8; //报错,宏常量不可以修改 //2、const修饰变量 const int month = 12; cout << "一年里总共有 " << month << " 个月份" << endl; //month = 24; //报错,常量是不可以修改的 system("pause"); return 0;} 标识符命名规则 标识符不能是关键字 标识符只能由字母、数字、下划线组成 第一个字符必须为字母或下划线 标识符中字母区分大小写 sizeof关键字**作用:**利用sizeof关键字可以统计数据类型所占内存大小 语法: sizeof( 数据类型 / 变量) 1234567891011121314int main() { cout << "short 类型所占内存空间为: " << sizeof(short) << endl; cout << "int 类型所占内存空间为: " << sizeof(int) << endl; cout << "long 类型所占内存空间为: " << sizeof(long) << endl; cout << "long long 类型所占内存空间为: " << sizeof(long long) << endl; system("pause"); return 0;} 整型结论:short < int <= long <= long long 浮点型作用:用于表示小数 浮点型变量分为两种: 单精度float 双精度double 两者的区别在于表示的有效数字范围不同。 12345678910111213141516171819202122int main() { float f1 = 3.14f; double d1 = 3.14; cout << f1 << endl; cout << d1<< endl; cout << "float sizeof = " << sizeof(f1) << endl; cout << "double sizeof = " << sizeof(d1) << endl; //科学计数法 float f2 = 3e2; // 3 * 10 ^ 2 cout << "f2 = " << f2 << endl; float f3 = 3e-2; // 3 * 0.1 ^ 2 cout << "f3 = " << f3 << endl; system("pause"); return 0;} 字符串型作用:用于表示一串字符 两种风格 C风格字符串: char 变量名[] = "字符串值"示例: 123456789int main() { char str1[] = "hello world"; cout << str1 << endl; system("pause"); return 0;} 注意:C风格的字符串要用双引号括起来 C++风格字符串: string 变量名 = "字符串值"示例: 123456789int main() { string str = "hello world"; cout << str << endl; system("pause"); return 0;} 注意:C++风格字符串,需要加入头文件==#include== 布尔类型**作用:**布尔数据类型代表真或假的值 bool类型只有两个值: true — 真(本质是1) false — 假(本质是0) 1234567891011121314int main() { bool flag = true; cout << flag << endl; // 1 flag = false; cout << flag << endl; // 0 cout << "size of bool = " << sizeof(bool) << endl; //1 system("pause"); return 0;} 数据的输入作用:用于从键盘获取数据 **关键字:**cin 语法: cin >> 变量 12345678910111213141516171819202122232425262728293031323334int main(){ //整型输入 int a = 0; cout << "请输入整型变量:" << endl; cin >> a; cout << a << endl; //浮点型输入 double d = 0; cout << "请输入浮点型变量:" << endl; cin >> d; cout << d << endl; //字符型输入 char ch = 0; cout << "请输入字符型变量:" << endl; cin >> ch; cout << ch << endl; //字符串型输入 string str; cout << "请输入字符串型变量:" << endl; cin >> str; cout << str << endl; //布尔类型输入 bool flag = true; cout << "请输入布尔型变量:" << endl; cin >> flag; cout << flag << endl; system("pause"); return EXIT_SUCCESS;} if语句12345678910111213141516171819202122int main() { //选择结构-单行if语句 //输入一个分数,如果分数大于600分,视为考上一本大学,并在屏幕上打印 int score = 0; cout << "请输入一个分数:" << endl; cin >> score; cout << "您输入的分数为: " << score << endl; //if语句 //注意事项,在if判断语句后面,不要加分号 if (score > 600) { cout << "我考上了一本大学!!!" << endl; } system("pause"); return 0;} 123456789101112131415161718192021int main() { int score = 0; cout << "请输入考试分数:" << endl; cin >> score; if (score > 600) { cout << "我考上了一本大学" << endl; } else { cout << "我未考上一本大学" << endl; } system("pause"); return 0;} 1234567891011121314151617181920212223242526272829 int main() { int score = 0; cout << "请输入考试分数:" << endl; cin >> score; if (score > 600) { cout << "我考上了一本大学" << endl; } else if (score > 500) { cout << "我考上了二本大学" << endl; } else if (score > 400) { cout << "我考上了三本大学" << endl; } else { cout << "我未考上本科" << endl; } system("pause"); return 0;} switch语句12345678910111213141516171819202122232425262728293031323334int main() { //请给电影评分 //10 ~ 9 经典 // 8 ~ 7 非常好 // 6 ~ 5 一般 // 5分以下 烂片 int score = 0; cout << "请给电影打分" << endl; cin >> score; switch (score) { case 10: case 9: cout << "经典" << endl; break; case 8: cout << "非常好" << endl; break; case 7: case 6: cout << "一般" << endl; break; default: cout << "烂片" << endl; break; } system("pause"); return 0;} while循环语句12345678910111213int main() { int num = 0; while (num < 10) { cout << "num = " << num << endl; num++; } system("pause"); return 0;} do…while123456789101112131415int main() { int num = 0; do { cout << num << endl; num++; } while (num < 10); system("pause"); return 0;} for1234567891011int main() { for (int i = 0; i < 10; i++) { cout << i << endl; } system("pause"); return 0;} break12345678910111213141516171819int main() { //在嵌套循环语句中使用break,退出内层循环 for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { if (j == 5) { break; } cout << "*" << " "; } cout << endl; } system("pause"); return 0;} continue123456789101112131415int main() { for (int i = 0; i < 100; i++) { if (i % 2 == 0) { continue; } cout << i << endl; } system("pause"); return 0;} goto123456789101112131415161718int main() { cout << "1" << endl; goto FLAG; cout << "2" << endl; cout << "3" << endl; cout << "4" << endl; FLAG: cout << "5" << endl; system("pause"); return 0;} 一维数组123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051int main() { //定义方式1 //数据类型 数组名[元素个数]; int score[10]; //利用下标赋值 score[0] = 100; score[1] = 99; score[2] = 85; //利用下标输出 cout << score[0] << endl; cout << score[1] << endl; cout << score[2] << endl; //第二种定义方式 //数据类型 数组名[元素个数] = {值1,值2 ,值3 ...}; //如果{}内不足10个数据,剩余数据用0补全 int score2[10] = { 100, 90,80,70,60,50,40,30,20,10 }; //逐个输出 //cout << score2[0] << endl; //cout << score2[1] << endl; //一个一个输出太麻烦,因此可以利用循环进行输出 for (int i = 0; i < 10; i++) { cout << score2[i] << endl; } //定义方式3 //数据类型 数组名[] = {值1,值2 ,值3 ...}; int score3[] = { 100,90,80,70,60,50,40,30,20,10 }; for (int i = 0; i < 10; i++) { cout << score3[i] << endl; } //数组名用途 int arr[10] = { 1,2,3,4,5,6,7,8,9,10 }; cout << "整个数组所占内存空间为: " << sizeof(arr) << endl; cout << "每个元素所占内存空间为: " << sizeof(arr[0]) << endl; cout << "数组的元素个数为: " << sizeof(arr) / sizeof(arr[0]) << endl; system("pause"); return 0;} 函数12345678910111213141516171819202122232425//函数定义int add(int num1, int num2) //定义中的num1,num2称为形式参数,简称形参{ int sum = num1 + num2; return sum;}int main() { int a = 10; int b = 10; //调用add函数 int sum = add(a, b);//调用时的a,b称为实际参数,简称实参 cout << "sum = " << sum << endl; a = 100; b = 100; sum = add(a, b); cout << "sum = " << sum << endl; system("pause"); return 0;} 值传递12345678910111213141516171819202122232425262728293031void swap(int num1, int num2){ cout << "交换前:" << endl; cout << "num1 = " << num1 << endl; cout << "num2 = " << num2 << endl; int temp = num1; num1 = num2; num2 = temp; cout << "交换后:" << endl; cout << "num1 = " << num1 << endl; cout << "num2 = " << num2 << endl; //return ; 当函数声明时候,不需要返回值,可以不写return}int main() { int a = 10; int b = 20; swap(a, b); cout << "mian中的 a = " << a << endl; cout << "mian中的 b = " << b << endl; system("pause"); return 0;} 函数的声明告诉编译器函数名称及如何调用函数。函数的实际主体可以单独定义。 函数的声明可以多次,但是函数的定义只能有一次 123456789101112131415161718192021//声明可以多次,定义只能一次//声明int max(int a, int b);int max(int a, int b);//定义int max(int a, int b){ return a > b ? a : b;}int main() { int a = 100; int b = 200; cout << max(a, b) << endl; system("pause"); return 0;} 函数的分文件编写函数分文件编写一般有4个步骤 创建后缀名为.h的头文件 创建后缀名为.cpp的源文件 在头文件中写函数的声明 在源文件中写函数的定义 123456//swap.h文件#include<iostream>using namespace std;//实现两个数字交换的函数声明void swap(int a, int b); 123456789101112//swap.cpp文件#include "swap.h"void swap(int a, int b){ int temp = a; a = b; b = temp; cout << "a = " << a << endl; cout << "b = " << b << endl;} 123456789101112//main函数文件#include "swap.h"int main() { int a = 100; int b = 200; swap(a, b); system("pause"); return 0;} 指针变量的定义和使用123456789101112131415161718192021int main() { //1、指针的定义 int a = 10; //定义整型变量a //指针定义语法: 数据类型 * 变量名 ; int * p; //指针变量赋值 p = &a; //指针指向变量a的地址 cout << &a << endl; //打印数据a的地址 cout << p << endl; //打印指针变量p //2、指针的使用 //通过*操作指针变量指向的内存 cout << "*p = " << *p << endl; system("pause"); return 0;} 指针变量和普通变量的区别 普通变量存放的是数据,指针变量存放的是地址 指针变量可以通过” * “操作符,操作指针变量指向的内存空间,这个过程称为解引用 总结1: 我们可以通过 & 符号 获取变量的地址 总结2:利用指针可以记录地址 总结3:对指针变量解引用,可以操作指针指向的内存 指针所占内存空间1234567891011121314151617int main() { int a = 10; int * p; p = &a; //指针指向数据a的地址 cout << *p << endl; //* 解引用 cout << sizeof(p) << endl; cout << sizeof(char *) << endl; cout << sizeof(float *) << endl; cout << sizeof(double *) << endl; system("pause"); return 0;} 总结:所有指针类型在32位操作系统下是4个字节 空指针和野指针空指针:指针变量指向内存中编号为0的空间 **用途:**初始化指针变量 **注意:**空指针指向的内存是不可以访问的 示例1:空指针 12345678910111213int main() { //指针变量p指向内存地址编号为0的空间 int * p = NULL; //访问空指针报错 //内存编号0 ~255为系统占用内存,不允许用户访问 cout << *p << endl; system("pause"); return 0;} 示例2:野指针 123456789101112int main() { //指针变量p指向内存地址编号为0x1100的空间 int * p = (int *)0x1100; //访问野指针报错 cout << *p << endl; system("pause"); return 0;} 总结:空指针和野指针都不是我们申请的空间,因此不要访问。 const修饰指针const修饰指针有三种情况 const修饰指针 — 常量指针 const修饰常量 — 指针常量 const即修饰指针,又修饰常量 示例: 123456789101112131415161718192021222324int main() { int a = 10; int b = 10; //const修饰的是指针,指针指向可以改,指针指向的值不可以更改 const int * p1 = &a; p1 = &b; //正确 //*p1 = 100; 报错 //const修饰的是常量,指针指向不可以改,指针指向的值可以更改 int * const p2 = &a; //p2 = &b; //错误 *p2 = 100; //正确 //const既修饰指针又修饰常量 const int * const p3 = &a; //p3 = &b; //错误 //*p3 = 100; //错误 system("pause"); return 0;} 技巧:看const右侧紧跟着的是指针还是常量, 是指针就是常量指针,是常量就是指针常量 指针和数组**作用:**利用指针访问数组中元素 示例: 1234567891011121314151617181920int main() { int arr[] = { 1,2,3,4,5,6,7,8,9,10 }; int * p = arr; //指向数组的指针 cout << "第一个元素: " << arr[0] << endl; cout << "指针访问第一个元素: " << *p << endl; for (int i = 0; i < 10; i++) { //利用指针遍历数组 cout << *p << endl; p++; } system("pause"); return 0;} 指针和函数**作用:**利用指针作函数参数,可以修改实参的值 示例: 12345678910111213141516171819202122232425262728293031//值传递void swap1(int a ,int b){ int temp = a; a = b; b = temp;}//地址传递void swap2(int * p1, int *p2){ int temp = *p1; *p1 = *p2; *p2 = temp;}int main() { int a = 10; int b = 20; swap1(a, b); // 值传递不会改变实参 swap2(&a, &b); //地址传递会改变实参 cout << "a = " << a << endl; cout << "b = " << b << endl; system("pause"); return 0;} 总结:如果不想修改实参,就用值传递,如果想修改实参,就用地址传递 结构体定义和使用语法:struct 结构体名 { 结构体成员列表 }; 通过结构体创建变量的方式有三种: struct 结构体名 变量名 struct 结构体名 变量名 = { 成员1值 , 成员2值…} 定义结构体时顺便创建变量 示例: 1234567891011121314151617181920212223242526272829303132333435//结构体定义struct student{ //成员列表 string name; //姓名 int age; //年龄 int score; //分数}stu3; //结构体变量创建方式3int main() { //结构体变量创建方式1 struct student stu1; //struct 关键字可以省略 stu1.name = "张三"; stu1.age = 18; stu1.score = 100; cout << "姓名:" << stu1.name << " 年龄:" << stu1.age << " 分数:" << stu1.score << endl; //结构体变量创建方式2 struct student stu2 = { "李四",19,60 }; cout << "姓名:" << stu2.name << " 年龄:" << stu2.age << " 分数:" << stu2.score << endl; stu3.name = "王五"; stu3.age = 18; stu3.score = 80; cout << "姓名:" << stu3.name << " 年龄:" << stu3.age << " 分数:" << stu3.score << endl; system("pause"); return 0;} 总结1:定义结构体时的关键字是struct,不可省略 总结2:创建结构体变量时,关键字struct可以省略 总结3:结构体变量利用操作符 ‘’.’’ 访问成员 结构体指针**作用:**通过指针访问结构体中的成员 利用操作符 >可以通过结构体指针访问结构体属性 示例: 1234567891011121314151617181920212223//结构体定义struct student{ //成员列表 string name; //姓名 int age; //年龄 int score; //分数};int main() { struct student stu = { "张三",18,100, }; struct student * p = &stu; p->score = 80; //指针通过 -> 操作符可以访问成员 cout << "姓名:" << p->name << " 年龄:" << p->age << " 分数:" << p->score << endl; system("pause"); return 0;} 总结:结构体指针可以通过 -> 操作符 来访问结构体中的成员 选择创建新项目–>空项目 函数 如果不体现生命,main方法 在函数的前面就会报错 有了函数的声明,main方法,可以写在函数的前面 函数的民生,可以写很多次,但是实现(定义)只能写一次 123456789101112131415161718192021222324#include <iostream>using namespace std;// 提前告诉编译器// 函数的民生,可以写很多次,但是实现(定义)只能写一次int max(int num1, int num2);int max(int num1, int num2);int max(int num1, int num2);int main(){ int sum = max(1, 2); cout << "计算器的结果:" << sum << endl; system("pause"); return 0;}int max(int num1, int num2) { int sum = num1 + num2; return sum;} 常量指针 特点:指针的指向可以改,指针指向的值不可以改 123const int * p = &a;*0 = 20 错误p = &b 对的 指针常量 特点:指针的指向不可以改,指针指向的值可以改 123int * const p = &a;*0 = 20 对的p = &b 错误 const 同时修饰指针和常量 123const int * const p = &a;*0 = 20 错的p = &b 错误 例子: 12345678910111213141516171819202122232425262728293031323334#include <iostream>using namespace std;int main(){ int a = 10; int b = 20; int* p = &a; cout << "第一个测试指针a地址:"<<p << "===" << *p << endl; int* p111 = &b; cout << "第一个测试指针b地址:"<< p111 << "===" << *p111 << endl; // const 修饰指针 const int* p1 = &a; // *p=20 错误 cout << "第二个测试指针地址:" << p1 << "===" << *p1 << endl; p1 = &b; cout << "第二个测试指针地址:" << p1 << "===" << *p1 << endl; // const 修饰常量 int* const p2 = &a; cout << "第三个测试指针地址:" << p2 << "===" << *p2 << endl; *p2=30; // p = &b; 错误 cout << "第三个测试指针地址:" << p2 << "===" << *p2 << endl; // const 修饰指针和常量 const int* const p3 = &a; system("pause"); return 0;}
ElasticSearch
手动安装7.x手动安装单机ElasticSearch7.x123456789101112131415161718192021222324252627282930# 下载tar包,(6.6.2)wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.6.2.tar.gz# 解压缩之后修改配置文件vi /elasticsearch-7.10.1/config/elasticsearch.ymlnetwork.host: 0.0.0.0 #设置成外网也可以访问# 追加内容,修改系统环境该变量,修改完需要重启才能生效vi /etc/sysctl.confvm.max_map_count=262144# 加载参数anthony@base:/usr/local/elasticsearch-6.6.2$ sudo sysctl -pvm.max_map_count = 262144# 允许打开的最大文件描述,加入vim /etc/security/limits.conf* soft nofile 65536* hard nofile 65536* soft nproc 4096 或者(65536)* hard nproc 4096 或者(65536)# 修改JVM分配大小 这两个值要一致vi config/jvm.options-Xms4g-Xmx4g# 启动bin/elasticsearchbin/elasticsearch -d #后台启动 安装分词器下载解压缩的哦elasticsearch的plugin目录下IK分词器https://github.com/medcl/elasticsearch-analysis-ik/releases/tag/v6.6.2 1POST /_analyze { "analyzer": "ik_smart", "text": " 程序员 } 拼音分词器https://github.com/medcl/elasticsearch-analysis-pinyin/releases/tag/v6.6.2 12345GET /medcl/_analyze { "text": ["程序员"], "analyzer": "pinyin_analyzer" } 报错原因: 1234567891011# chown -R 用户名:用户名 目录名Exception in thread "main" java.nio.file.AccessDeniedException: /root/home/searchengine/elasticsearch-6.2.4/config/jvm.options at sun.nio.fs.UnixException.translateToIOException(UnixException.java:84) at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:102) at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:107) at sun.nio.fs.UnixFileSystemProvider.newByteChannel(UnixFileSystemProvider.java:214) at java.nio.file.Files.newByteChannel(Files.java:361) at java.nio.file.Files.newByteChannel(Files.java:407) at java.nio.file.spi.FileSystemProvider.newInputStream(FileSystemProvider.java:384) at java.nio.file.Files.newInputStream(Files.java:152) at org.elasticsearch.tools.launchers.JvmOptionsParser.main(JvmOptionsParser.java:58) 手动安装集群ElasticSearch7.x1.修改一台elasticsearch的elasticsearch.yml 1234567891011cluster.name: kkb-esnode.name: node0node.master: truenetwork.host: 0.0.0.0http.port: 9200transport.tcp.port: 9300discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300","127.0.0.1:9301","127.0.0.1:9302"]discovery.zen.minimum_master_nodes: 2http.cors.enabled: truehttp.cors.allow-origin: "*" 2.拷贝到其他的服务器 修改上面呢的配置文件中的3个配置 123node.name: node0http.port: 9200transport.tcp.port: 9300 3.检查健康状态 1http://localhost:9200/_cat/health?v 手动单机Kibana7.x安装1234567891011# 下载解压https://artifacts.elastic.co/downloads/kibana/kibana-6.6.2-linux-x86_64.tar.gz# 修改配置文件vi kibana-6.4.2-linux-x86_64/config/kibana.ymlserver.port: 5600server.host: "0.0.0.0"elasticsearch.url: "http://localhost:9200"# 启动,后台启动./bin/kibana & 手动单机Logstash7.x安装mysql-connector-java-5.1.48.jar是放在logstash-6.6.2/logstash-core/lib/jars,这样就不需要写相对路径了 12345678910111213141516171819202122input { jdbc { jdbc_driver_library => "mysql-connector-java-5.1.48.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://192.168.111.128:3306/ssm" jdbc_user => "root" jdbc_password => "123456" jdbc_paging_enabled => "true" jdbc_page_size => "2" statement => "SELECT id,'name',phone FROM phone where update_data >= :sql_last_value" tracking_column_type => "timestamp" tracking_column => "update_date" schedule => "* * * * *" }}output { elasticsearch { index => "phone" document_id=> "%{id}" hosts => ["http://192.168.111.128:9200"] }} 启动命令 1./logstash -f ../config/logstash-sample.conf 手动安装8.x 📌 8.0 已经自带了jdk 二进制包安装ES安装12345# 下载包并解压wget https://xxxx.tar# 启动./elasticsearch./bin/elasticsearch 运行结果123456789101112131415161718192021✅ Elasticsearch security features have been automatically configured!✅ Authentication is enabled and cluster connections are encrypted.ℹ️ Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`): Shb2+OuCKthxFTxD-*jEℹ️ HTTP CA certificate SHA-256 fingerprint: ceb6add33653a4c8f3493bddde839cdfcf1035a3a50cb01247962495f15ea40fℹ️ Configure Kibana to use this cluster:• Run Kibana and click the configuration link in the terminal when Kibana starts.• Copy the following enrollment token and paste it into Kibana in your browser (valid for the next 30 minutes): eyJ2ZXIiOiI4LjAuMCIsImFkciI6WyIxMC4wLjIuNDo5MjAwIl0sImZnciI6ImNlYjZhZGQzMzY1M2E0YzhmMzQ5M2JkZGRlODM5Y2RmY2YxMDM1YTNhNTBjYjAxMjQ3OTYyNDk1ZjE1ZWE0MGYiLCJrZXkiOiJXaEV4SDM4QlZXV242cU9KejgxNTprUVliUEY0dlJfaU56MnRFaDlRY0hnIn0=ℹ️ Configure other nodes to join this cluster:• On this node: ⁃ Create an enrollment token with `bin/elasticsearch-create-enrollment-token -s node`. ⁃ Uncomment the transport.host setting at the end of config/elasticsearch.yml. ⁃ Restart Elasticsearch.• On other nodes: ⁃ Start Elasticsearch with `bin/elasticsearch --enrollment-token <token>`, using the enrollment token that you generated. 停止服务,修改配置文件然后输入:curl 127.0.0.1:9200,返回是: 12[hy@localhost ~]$ curl 127.0.0.1:9200curl: (52) Empty reply from server 而ES那边也有响应 123# 看来这和以前的7.1.1版本不一样了[2022-02-19T05:02:02,812][WARN ][o.e.x.s.t.n.SecurityNetty4HttpServerTransport] [localhost.localdomain] received plaintext http traffic on an https channel, closing connection Netty4HttpChannel{localAddress=/127.0.0.1:9200, remoteAddress=/127.0.0.1:38518}[2022-02-19T05:02:27,144][WARN ][o.e.x.s.t.n.SecurityNetty4HttpServerTransport] [localhost.localdomain] received plaintext http traffic on an https channel, closing connection Netty4HttpChannel{localAddress=/[0:0:0:0:0:0:0:1]:9200, remoteAddress=/[0:0:0:0:0:0:0:1]:56174} 修改配置文件 123456789# 需要把security features都关闭,否则http不对外服务了# true改成falsexpack.security.http.ssl: enabled: false keystore.path: certs/http.p12# Enable encryption and mutual authentication between cluster nodesxpack.security.transport.ssl: enabled: false 测试访问12http://localhost:9200需要输入账号和密码:elastic / Shb2+OuCKthxFTxD-*jE java访问测试12345<dependency> <groupId>org.elasticsearch.client</groupId> <artifactId>elasticsearch-rest-client</artifactId> <version>8.0.0</version></dependency> 1234567891011121314151617181920212223242526272829@Bean(destroyMethod = "close")public RestClient restClient() throws CertificateException, IOException, KeyStoreException, NoSuchAlgorithmException, KeyManagementException { final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("elastic", "VWFjE60dbpKIjbajGJQ4")); Path caCertificatePath = Paths.get("E:\\es\\elasticsearch-8.0.0-windows-x86_64\\elasticsearch-8.0.0\\config\\certs\\http_ca.crt"); CertificateFactory factory = CertificateFactory.getInstance("X.509"); Certificate trustedCa; try (InputStream is = Files.newInputStream(caCertificatePath)) { trustedCa = factory.generateCertificate(is); } KeyStore trustStore = KeyStore.getInstance("pkcs12"); trustStore.load(null, null); trustStore.setCertificateEntry("ca", trustedCa); SSLContextBuilder sslContextBuilder = SSLContexts.custom() .loadTrustMaterial(trustStore, null); final SSLContext sslContext = sslContextBuilder.build(); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200, "https")) .setHttpClientConfigCallback(httpClientBuilder -> { httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); return httpClientBuilder.setSSLContext(sslContext); }); return builder.build();} 1234567//注入client@AutowiredRestClient restClient;Request request = new Request("GET","/ess_book/_doc/1");Response response = restClient.performRequest(request);return EntityUtils.toString(response.getEntity()); 集群安装ES全栈开发之ElasticSearch8.0分布式搜索引擎集群及其高可用测试 - 掘金 安装Kibana安装报错max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144] 123456vim /etc/sysctl.confvm.max_map_count=****262144# 使生效sysctl -p**** 安装插件安装HEADHead是elasticsearch的集群管理工具,可以用于数据的浏览和查询 (1)elasticsearch-head是一款开源软件,被托管在github上面,所以如果我们要使用它,必须先安装git,通过git获取elasticsearch-head (2)运行elasticsearch-head会用到grunt,而grunt需要npm包管理器,所以nodejs是必须要安装的 (3)elasticsearch5.0之后,elasticsearch-head不做为插件放在其plugins目录下了。使用git拷贝elasticsearch-head到本地 cd /usr/local/ git clone git://github.com/mobz/elasticsearch-head.git (4)安装elasticsearch-head依赖包 [root@localhost local]# npm install -g grunt-cli [root@localhost _site]# cd /usr/local/elasticsearch-head/ [root@localhost elasticsearch-head]# cnpm install (5)修改Gruntfile.js [root@localhost _site]# cd /usr/local/elasticsearch-head/ [root@localhost elasticsearch-head]# vi Gruntfile.js 在connect–>server–>options下面添加:hostname:’*’,允许所有IP可以访问 (6)修改elasticsearch-head默认连接地址[root@localhost elasticsearch-head]# cd /usr/local/elasticsearch-head/_site/ [root@localhost _site]# vi app.js 将this.base_uri = this.config.base_uri || this.prefs.get(“app-base_uri”) || “
Flink
Flink尝试flink本地安装步骤一:下载 为了能够运行Flink,唯一的要求是安装有效的Java 8或11。您可以通过发出以下命令来检查Java的正确安装: 123456# 要安装java环境java -version# 下载解压flinktar -xzf flink-1.11.2-bin-scala_2.11.tgzcd flink-1.11.2-bin-scala_2.11 步骤二:启动本地集群 1234$ ./bin/start-cluster.shStarting cluster.Starting standalonesession daemon on host.Starting taskexecutor daemon on host. 步骤三:提交一个job 12345678$ ./bin/flink run examples/streaming/WordCount.jar$ tail log/flink-*-taskexecutor-*.out (to,1) (be,1) (or,1) (not,1) (to,2) (be,2) 步骤四:停止集群 1$ ./bin/stop-cluster.sh 使用DataStream API进行欺诈检测Java环境123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>frauddetection</groupId> <artifactId>frauddetection</artifactId> <version>0.1</version> <packaging>jar</packaging> <name>Flink Walkthrough DataStream Java</name> <url>https://flink.apache.org</url> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <flink.version>1.10.2</flink.version> <java.version>1.8</java.version> <scala.binary.version>2.11</scala.binary.version> <maven.compiler.source>${java.version}</maven.compiler.source> <maven.compiler.target>${java.version}</maven.compiler.target> </properties> <dependencies> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-walkthrough-common_${scala.binary.version}</artifactId> <version>${flink.version}</version> </dependency> <!-- This dependency is provided, because it should not be packaged into the JAR file. --> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-streaming-java_${scala.binary.version}</artifactId> <version>${flink.version}</version><!-- <scope>provided</scope>--> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <version>1.18.0</version> <scope>provided</scope> </dependency> <!-- Add connector dependencies here. They must be in the default scope (compile). --> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-connector-kafka-0.10_${scala.binary.version}</artifactId> <version>${flink.version}</version> </dependency> <!-- Add logging framework, to produce console output when running in the IDE. --> <!-- These dependencies are excluded from the application JAR by default. --> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>1.7.7</version> <scope>runtime</scope> </dependency> <dependency> <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.17</version> <scope>runtime</scope> </dependency> </dependencies> <build> <plugins> <!-- Java Compiler --> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>3.1</version> <configuration> <source>${java.version}</source> <target>${java.version}</target> </configuration> </plugin> <!-- We use the maven-shade plugin to create a fat jar that contains all necessary dependencies. --> <!-- Change the value of <mainClass>...</mainClass> if your program entry point changes. --> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-shade-plugin</artifactId> <version>3.0.0</version> <executions> <!-- Run shade goal on package phase --> <execution> <phase>package</phase> <goals> <goal>shade</goal> </goals> <configuration> <artifactSet> <excludes> <exclude>org.apache.flink:force-shading</exclude> <exclude>com.google.code.findbugs:jsr305</exclude> <exclude>org.slf4j:*</exclude> <exclude>log4j:*</exclude> </excludes> </artifactSet> <filters> <filter> <!-- Do not copy the signatures in the META-INF folder. Otherwise, this might cause SecurityExceptions when using the JAR. --> <artifact>*:*</artifact> <excludes> <exclude>META-INF/*.SF</exclude> <exclude>META-INF/*.DSA</exclude> <exclude>META-INF/*.RSA</exclude> </excludes> </filter> </filters> <transformers> <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer"> <mainClass>spendreport.FraudDetectionJob</mainClass> </transformer> </transformers> </configuration> </execution> </executions> </plugin> </plugins> <pluginManagement> <plugins> <!-- This improves the out-of-the-box experience in Eclipse by resolving some warnings. --> <plugin> <groupId>org.eclipse.m2e</groupId> <artifactId>lifecycle-mapping</artifactId> <version>1.0.0</version> <configuration> <lifecycleMappingMetadata> <pluginExecutions> <pluginExecution> <pluginExecutionFilter> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-shade-plugin</artifactId> <versionRange>[3.0.0,)</versionRange> <goals> <goal>shade</goal> </goals> </pluginExecutionFilter> <action> <ignore/> </action> </pluginExecution> <pluginExecution> <pluginExecutionFilter> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <versionRange>[3.1,)</versionRange> <goals> <goal>testCompile</goal> <goal>compile</goal> </goals> </pluginExecutionFilter> <action> <ignore/> </action> </pluginExecution> </pluginExecutions> </lifecycleMappingMetadata> </configuration> </plugin> </plugins> </pluginManagement> </build></project> 输入File输入123456public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final DataStream<String> stringDataStreamSource = env.readTextFile("Sensor.txt"); stringDataStreamSource.print("data"); env.execute();} Kafka需要引入包 12345<dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-connector-kafka-0.10_${scala.binary.version}</artifactId> <version>${flink.version}</version></dependency> 12345678public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // 配置kafka,账号密码 final Properties properties = new Properties(); final DataStreamSource<String> sensor = env.addSource(new FlinkKafkaConsumer010<String>("sensor", new SimpleStringSchema(), properties)); sensor.print("data"); env.execute();} 集合获取123456789101112131415public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<SensorReading> dataStreamSource = env.fromCollection(Arrays.asList( new SensorReading("sen1", 1l, 37.5), new SensorReading("sen2", 2l, 38.5), new SensorReading("sen3", 3l, 39.5), new SensorReading("sen4", 4l, 40.5))); final DataStreamSource<Integer> integerDataStreamSource = env.fromElements(11, 12, 13, 14, 15); dataStreamSource.print("data"); integerDataStreamSource.print("my list"); env.execute();} 输出自定义数据源模拟从kafka中获取 123456789public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // kafka配置 final Properties properties = new Properties(); // 加入自定义的数据源 final DataStreamSource sensor = env.addSource(new MySensorce()); sensor.print("data"); env.execute();} 自定义数据源 123456789101112131415161718192021222324252627282930313233public class MySensorce implements SourceFunction<SensorReading> { private boolean running = true; @Override public void run(SourceContext<SensorReading> sourceContext) throws Exception { final Random random = new Random(); final HashMap<String, Double> stringDoubleHashMap = new HashMap<>(); for (int i = 1; i < 11; i++) { stringDoubleHashMap.put(i + "", random.nextGaussian()); } while (running) { for (String id : stringDoubleHashMap.keySet()) { final double v = stringDoubleHashMap.get(id) + random.nextGaussian(); final SensorReading sensorReading = new SensorReading(id, System.currentTimeMillis(), v); sourceContext.collect(sensorReading); } Thread.sleep(1000); } } @Override public void cancel() { running = false; }} 算子基本算子map123456final SingleOutputStreamOperator<Integer> map = inputStream.map(new MapFunction<String, Integer>() { @Override public Integer map(String s) throws Exception { return s.length(); }}); flatmap123456final SingleOutputStreamOperator<String> stringSingleOutputStreamOperator = inputStream.flatMap(new FlatMapFunction<String, String>() { @Override public void flatMap(String s, Collector<String> collector) throws Exception { Arrays.stream(s.split(",")).forEach(collector::collect); }}); filter123456final SingleOutputStreamOperator<String> filter = inputStream.filter(new FilterFunction<String>() { @Override public boolean filter(String s) throws Exception { return s.startsWith("sen"); }}); 分流,合流1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final DataStream<String> inputStream = env.readTextFile("Sensor.txt"); final DataStream<SensorReading> map = inputStream.map(line -> { final String[] split = line.split(","); return new SensorReading(split[0], Long.parseLong(split[1]), Double.parseDouble(split[2])); }); // 分流 final SplitStream<SensorReading> split = map.split(new OutputSelector<SensorReading>() { @Override public Iterable<String> select(SensorReading value) { return value.getTemp() > 30 ? Collections.singletonList("hight") : Collections.singletonList("low"); } }); final DataStream<SensorReading> hight = split.select("hight"); final DataStream<SensorReading> low = split.select("low"); final DataStream<SensorReading> all = split.select("low","hight"); hight.print("hight"); low.print("low"); all.print("all"); final SingleOutputStreamOperator<Tuple2<String, Double>> warningStream = hight.map(new MapFunction<SensorReading, Tuple2<String, Double>>() { @Override public Tuple2<String, Double> map(SensorReading sensorReading) throws Exception { return new Tuple2<>(sensorReading.getId(), sensorReading.getTemp()); } }); final ConnectedStreams<Tuple2<String, Double>, SensorReading> connectedStreams = warningStream.connect(low); // 合流 final SingleOutputStreamOperator<Object> result = connectedStreams.map(new CoMapFunction<Tuple2<String, Double>, SensorReading, Object>() { @Override public Object map1(Tuple2<String, Double> value) throws Exception { return new Tuple3<>(value.f0, value.f1, "高温报警"); } @Override public Object map2(SensorReading value) throws Exception { return new Tuple2<>(value.getId(), "正常"); } }); // 第二种方法,ubion final DataStream<SensorReading> union = hight.union(low); result.print(); union.print("ubion"); env.execute();} 富函数1234567891011121314151617181920212223242526272829303132333435363738394041424344public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final DataStream<String> inputStream = env.readTextFile("Sensor.txt"); final DataStream<SensorReading> map = inputStream.map(line -> { final String[] split = line.split(","); return new SensorReading(split[0], Long.parseLong(split[1]), Double.parseDouble(split[2])); }); DataStream<Tuple2<String, Integer>> resultStream = map.map(new RichMyMapper()); resultStream.print(); env.execute();}public static class MyMapper implements MapFunction<SensorReading, Tuple2<String, Integer>> { @Override public Tuple2<String, Integer> map(SensorReading sensorReading) throws Exception { return new Tuple2<>(sensorReading.getId(), sensorReading.getId().length()); }}// 自定义富函数public static class RichMyMapper extends RichMapFunction<SensorReading, Tuple2<String, Integer>> { @Override public Tuple2<String, Integer> map(SensorReading value) throws Exception { // getRuntimeContext().getState(); return new Tuple2<>(value.getId(), getRuntimeContext().getIndexOfThisSubtask()); } @Override public void open(Configuration parameters) throws Exception { // 用来建立数据库连接 super.open(parameters); } @Override public void close() throws Exception { // 关闭数据库 super.close(); }} 分组,聚合1234567891011121314151617181920212223242526public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); final DataStream<String> inputStream = env.readTextFile("Sensor.txt"); final DataStream<SensorReading> map = inputStream.map(line -> { final String[] split = line.split(","); return new SensorReading(split[0], Long.parseLong(split[1]), Double.parseDouble(split[2])); }); // 分组 // final KeyedStream<SensorReading, Tuple> id = map.keyBy("id"); final KeyedStream<SensorReading, String> keyedStream = map.keyBy(SensorReading::getId); // reduce final SingleOutputStreamOperator<SensorReading> reduce = keyedStream.reduce(new ReduceFunction<SensorReading>() { @Override public SensorReading reduce(SensorReading valu1, SensorReading valu2) throws Exception { return new SensorReading(valu1.getId(), valu2.getTimestamp(), Math.max(valu1.getTemp(), valu2.getTemp())); } }); reduce.print(); env.execute();}
Mycat
主从复制启动1 .下载jre包 2 下载mycat包 3 打包镜像 Dockerfile 1234567891011121314FROM centos# 解压这个包到home下面ADD server-jre-8u251-linux-x64.tar.gz /home/ADD ./Mycat-server-1.6.7.4-release-20200105164103-linux.tar.gz /homeENV WORKPATH /home/mycat/WORKDIR $WORKPATHENV JAVA_HOME /home/jdk1.8.0_251ENV CLASSPATH $JAVA_HOME/dt.jar:$JAVA_HOME/lib/tools.jarENV PATH $PATH:$JAVA_HOME/BIN:$CATALINA_HOME/lib:$CATALINA_HOME/bin# 暴露8066端口EXPOSE 8066CMD /home/mycat/bin/mycat console 把mycat里的conf 和 logs 从容器中拷贝出来 再挂在两个目录,并启动 123456789101112131415docker run -it \ --name mycat \ -p 8066:8066 \ -v /home/anthony/mycat/conf:/home/mycat/conf/ \ -v /home/anthony/mycat/logs:/home/mycat/logs/ \ java1234/mycat:1.0Running Mycat-server...Removed stale pid file: /home/mycat/logs/mycat.pidwrapper | --> Wrapper Started as Consolewrapper | Launching a JVM...jvm 1 | Wrapper (Version 3.2.3) http://wrapper.tanukisoftware.orgjvm 1 | Copyright 1999-2006 Tanuki Software, Inc. All Rights Reserved.jvm 1 |jvm 1 | MyCAT Server startup successfully. see logs in logs/mycat.log 配置 schems.xml 定义逻辑库,表,分片节点等内容 rule.xml 定义分片规则 server.xml 定义用户以及系统相关变量 安装mysql5.7123# 从容器中拷贝这些文件docker cp xxx:/etc/mysql/mysql.conf.d/ /home/anthony/mysqldocker cp xxx:/var/log/ /home/anthony/mysql 启动的时候映射这些 创建网络12345678910# 可以查看容器的虚拟ipdocker inspect ac7# 查看网络docker network ls# 自定义一个网络模式# extnetwork 是网络模式的名字# 172.20.0.1 是网关docker network create --subnet=172.20.0.0/16 extnetwork mysql,mycat指定网路,重建容器12345678910111213141516171819202122232425262728# 指定网络模式和ip地址--net extnetwork --ip 172.20.0.2docker run -itd \ --name mysql-master \ -p 3306:3306 \ -e MYSQL_ROOT_PASSWORD=123456 \ -v /home/anthony/mysql/mysql.conf.d/:/etc/mysql/conf.d/ \ -v /home/anthony/mysql/log/:/var/log/ \ --net extnetwork --ip 172.20.0.2 \ mysql:5.7docker run -itd \ --name mysql-slave \ -p 3307:3306 \ -e MYSQL_ROOT_PASSWORD=123456 \ -v /home/anthony/mysql2/mysql.conf.d/:/etc/mysql/conf.d/ \ -v /home/anthony/mysql2/log/:/var/log/ \ --net extnetwork --ip 172.20.0.3 \ mysql:5.7docker run -it \ --name mycat \ -p 8066:8066 \ -v /home/anthony/mycat/conf:/home/mycat/conf/ \ -v /home/anthony/mycat/logs:/home/mycat/logs/ \ --net extnetwork --ip 172.20.0.4 \ java1234/mycat:1.0 主从配置主,在[mysqld] 12345678910111213# 主服务器ID,必须唯一server-id=2# 开启和设置二进制日志文件名称log_bin=mysql-bin# 要同步的数据库binlog-do-db=db_java1234# 不需要同步的数据库binlog-ignore_db=mysqlbinlog-ignore_db=information_schemabinlog-ignore_db=performation_schemabinlog_ignore_db=sys# 设置logbin格式,mysql默认采用statement,建议使用mixedbinlog_format=MIXED 从,在[mysqld] 12345678910111213141516# 主服务器ID,必须唯一server-id=3# 这行是从库需要添加的relay-log=mysql-relay# 开启和设置二进制日志文件名称log_bin=mysql-bin# 要同步的数据库binlog-do-db=db_java1234# 不需要同步的数据库binlog-ignore_db=mysqlbinlog-ignore_db=information_schemabinlog-ignore_db=performation_schemabinlog_ignore_db=sys# 设置logbin格式,mysql默认采用statement,建议使用mixedbinlog_format=MIXED 测试主从还没有生效 12345678910mysql> show master status;+------------------+----------+--------------+--------------------------------------------------+-------------------+| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |+------------------+----------+--------------+--------------------------------------------------+-------------------+| mysql-bin.000003 | 357 | db_java1234 | mysql,information_schema,performation_schema,sys | |+------------------+----------+--------------+--------------------------------------------------+-------------------+1 row in set (0.01 sec)mysql> show slave status;Empty set 设置用户名在master执行 12345678910111213# 创建用户名为slave1# 172.20.0.3从库ip# 123456 密码CREATE USER 'slave1'@'172.20.0.3' IDENTIFIED BY '123456';# 语法:GRANT privileges ON databasename.tablename TO 'username'@'host'# 172.20.0.3 从库# 两个权限:REPLICATION和SLAVE# *.* 所有的库和表GRANT REPLICATION SLAVE ON *.* TO 'slave1'@'172.20.0.3';# 刷新权限FLUSH PRIVILEGES; 在slave执行 1234CHANGE MASTER TO MASTER_HOST='172.20.0.2',MASTER_USER='slave1',MASTER_PASSWORD='123456',MASTER_LOG_FILE='mysql-bin.000003',MASTER_LOG_POS=964;# 开启主从start slave; 在从库执行 1show slave status; 主要看Slave_io_running =yes 主要看Slave_sql_running =yes 读写分离scheml.xmlschema标签1234567<schema name="TESTDB" checkSQLschema="true" sqlMaxLimit="100" randomDataNode="dn1"> <!-- auto sharding by id (long) --> <!--splitTableNames 启用<table name 属性使用逗号分割配置多个表,即多个表使用这个配置--> <table name="travelrecord,address" dataNode="dn1,dn2,dn3" rule="auto-sharding-long" splitTableNames ="true"/> <!-- <table name="oc_call" primaryKey="ID" dataNode="dn1$0-743" rule="latest-month-calldate" /> --></schema> name: 配置逻辑库的名称 checkSQLschema: 当执行select * from T_Account.user 为true时,mycat会把sql转成select * from user false 会报错 sqlMaxLimit:最大查询每页条数,如果sql指定了limit,就以sql为准,没有,就以配置为准 randomDataNode:这个先不用,先改成datanote='dn1' datanote标签123<dataNode name="dn1" dataHost="localhost1" database="db1" /><dataNode name="dn2" dataHost="localhost1" database="db2" /><dataNode name="dn3" dataHost="localhost1" database="db3" /> name:数据分片节点名称 dataHost:定义该数据分片节点属于哪台数据库主机 database:指定哪个数据库,db_java1234 datahost标签123456789<dataHost name="localhost1" maxCon="1000" minCon="10" balance="3" writeType="0" dbType="mysql" dbDriver="native" switchType="1" slaveThreshold="100"> <heartbeat>select user()</heartbeat> <!-- can have multi write hosts --> <writeHost host="hostM1" url="localhost:3306" user="root" password="123456"> </writeHost> <!-- <writeHost host="hostM2" url="localhost:3316" user="root" password="123456"/> --></dataHost> name:跟datanote标签的datahost对应 maxCon 指定每个读写实例连接池的的最大链接,write合readhost标签都会使用这个属性的值来实例化出连接池的最大连接数 min 最小的连接 balance: 0 不开启读写分离,所有读操作都发送到当前可用的writehost上 1 2 所有读操作都随机在writehost合readehost上分发 3 所有读请求随机分发到writehost合readhos 双组双从第二套主从1.在mysql添加中添加 123log-slave-update=1auto-increment-increment=2auto-increment-offset=1 2.把mysql文件复制一份mysql3 3.把mysql3的配置文件修修改这些配置 123# 主服务器ID,必须唯一server-id=5auto-increment-offset=2 4.再创建两个容器,m2 和 s2 master 和 master同步就是相互slave 测试读写分离插入语句 1insert into test valus(@@hostname)
Spring Security
Spring Security1234<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-security</artifactId></dependency> 登录业务 1234567891011121314151617181920212223242526272829303132333435363738@Servicepublic class LoginService { public static Map<String, RedisUser> map = new HashMap<>(); @Resource AuthenticationManager authenticationManager; public String login(SysUser sysUser) { // AuthenticationManager 进行用户认证 UsernamePasswordAuthenticationToken usernamePasswordAuthenticationToken = new UsernamePasswordAuthenticationToken(sysUser.getUsername(), sysUser.getPassword()); Authentication authenticate = authenticationManager.authenticate(usernamePasswordAuthenticationToken); // 如果认证不通过,给出对应的提示 if (Objects.isNull(authenticate)) { throw new RuntimeException("登录失败"); } // 如果认证通过,使用username给jwt生成一个 RedisUser principal = (RedisUser) authenticate.getPrincipal(); map.put(principal.getUsername(), principal); return createJWT("1234567", 9990000, principal.getUsername()); } public void logout() { UsernamePasswordAuthenticationToken authentication = (UsernamePasswordAuthenticationToken) SecurityContextHolder.getContext().getAuthentication(); RedisUser principal = (RedisUser) authentication.getPrincipal(); String username = principal.getUsername(); // 从redis里删除 map.remove(username); }} 123456789101112131415161718192021222324@RestController@RequestMapping("/")public class LoginController { @Resource LoginService loginService; @PostMapping("/user/login") public ResponseEntity<HashMap> login(@RequestParam("username") String username,@RequestParam("password") String password){ SysUser sysUser = new SysUser(); sysUser.setUsername(username); sysUser.setPassword(password); HashMap<String, Object> stringObjectHashMap = new HashMap<>(); stringObjectHashMap.put("token", loginService.login(sysUser)); return ResponseEntity.status(200).body(stringObjectHashMap); } @PostMapping("/user/logout") public ResponseEntity<String> logout(){ loginService.logout(); return ResponseEntity.ok("success"); }} 验证账户 12345678910111213141516171819202122232425262728293031323334353637383940414243import com.example.spirngsecutirylearn.pojo.RedisUser;import com.example.spirngsecutirylearn.pojo.SysUser;import org.springframework.security.core.userdetails.UserDetails;import org.springframework.security.core.userdetails.UserDetailsService;import org.springframework.security.core.userdetails.UsernameNotFoundException;import org.springframework.stereotype.Service;import java.util.ArrayList;@Servicepublic class TestService implements UserDetailsService { static String user = "anthony"; static String password = "$2a$10$yyR9WuT9JY/bpe1VPU0yguqlv0lWpgzTD9NEetf2.n8y7NXIa1rfm"; /** * DaoAuthenticationProvier 会调用这个方法查询用户,并且返回UserDetails对象 * @param username * @return * @throws UsernameNotFoundException */ @Override public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException { // 从数据库查询用户信息 SysUser sysUser = new SysUser(); if (username.equals(user)) { sysUser.setUsername(user); sysUser.setPassword(password); }else { // 没有查到用户 throw new RuntimeException("没有查到用户信息"); } // 查询对应的权限信息 ArrayList<String> strings = new ArrayList<>();// strings.add("test"); strings.add("admin"); // 返回封装的信息 return new RedisUser(sysUser,strings); }} 缓存的用户对象 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263package com.example.spirngsecutirylearn.pojo;import lombok.AllArgsConstructor;import lombok.Data;import org.springframework.security.core.GrantedAuthority;import org.springframework.security.core.authority.SimpleGrantedAuthority;import org.springframework.security.core.userdetails.UserDetails;import java.util.Collection;import java.util.List;import java.util.stream.Collectors;@Datapublic class RedisUser implements UserDetails { SysUser sysUser; List<String> list; public RedisUser(SysUser sysUser, List<String> list) { this.sysUser = sysUser; this.list = list; } /** * 权限 * @return */ @Override public Collection<? extends GrantedAuthority> getAuthorities() { return list.stream().map(SimpleGrantedAuthority::new).collect(Collectors.toList()); } @Override public String getPassword() { return sysUser.getPassword(); } @Override public String getUsername() { return sysUser.getUsername(); } @Override public boolean isAccountNonExpired() { return true; } @Override public boolean isAccountNonLocked() { return true; } @Override public boolean isCredentialsNonExpired() { return true; } @Override public boolean isEnabled() { return true; }} 认证失败的回调 123456789101112131415161718192021222324252627282930313233package com.example.spirngsecutirylearn.handler;import cn.hutool.json.JSONUtil;import org.springframework.http.ResponseEntity;import org.springframework.security.core.AuthenticationException;import org.springframework.security.web.AuthenticationEntryPoint;import org.springframework.stereotype.Component;import javax.servlet.ServletException;import javax.servlet.http.HttpServletRequest;import javax.servlet.http.HttpServletResponse;import java.io.IOException;@Componentpublic class AuthException implements AuthenticationEntryPoint { @Override public void commence(HttpServletRequest request, HttpServletResponse response, AuthenticationException authException) throws IOException, ServletException { ResponseEntity<String> responseEntity = ResponseEntity.ok().body("认证失败,请重新登录"); String str = JSONUtil.toJsonStr(responseEntity); /// 处理异常 extracted(response,str); } private static void extracted(HttpServletResponse response,String string) throws IOException { response.setStatus(200); response.setContentType("application/json"); response.setCharacterEncoding("utf-8"); response.getWriter().println(string); }} 授权失败的回调 123456789101112131415161718192021222324252627282930313233package com.example.spirngsecutirylearn.handler;import cn.hutool.json.JSONUtil;import org.springframework.http.ResponseEntity;import org.springframework.security.access.AccessDeniedException;import org.springframework.security.core.AuthenticationException;import org.springframework.security.web.AuthenticationEntryPoint;import org.springframework.security.web.access.AccessDeniedHandler;import org.springframework.stereotype.Component;import javax.servlet.ServletException;import javax.servlet.http.HttpServletRequest;import javax.servlet.http.HttpServletResponse;import java.io.IOException;@Componentpublic class AccessException implements AccessDeniedHandler { @Override public void handle(HttpServletRequest request, HttpServletResponse response, AccessDeniedException accessDeniedException) throws IOException, ServletException { ResponseEntity<String> responseEntity = ResponseEntity.ok().body("权限不租"); String str = JSONUtil.toJsonStr(responseEntity); /// 处理异常 extracted(response,str); } private static void extracted(HttpServletResponse response,String string) throws IOException { response.setStatus(200); response.setContentType("application/json"); response.setCharacterEncoding("utf-8"); response.getWriter().println(string); }} 开启注解权限控制和自定义注解 1234567891011121314151617181920@RestController@RequestMapping("/test")public class TestController { @GetMapping("/hello") @PreAuthorize("hasAnyAuthority('test')") public ResponseEntity<String> hello(){ return ResponseEntity.ok("hello server"); } /** * 自定义校验权限 * @return */ @GetMapping("/hello2") @PreAuthorize("@anthony.hasAnyAuthority('test')") public ResponseEntity<String> hello2(){ return ResponseEntity.ok("hello server2"); }} 配置 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100package com.example.spirngsecutirylearn.cofig;import com.example.spirngsecutirylearn.filter.JwtFilter;import com.example.spirngsecutirylearn.handler.AccessException;import com.example.spirngsecutirylearn.handler.AuthException;import org.springframework.context.annotation.Bean;import org.springframework.context.annotation.Configuration;import org.springframework.http.HttpMethod;import org.springframework.security.authentication.AuthenticationManager;import org.springframework.security.config.annotation.authentication.configuration.EnableGlobalAuthentication;import org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity;import org.springframework.security.config.annotation.web.builders.HttpSecurity;import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;import org.springframework.security.config.http.SessionCreationPolicy;import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;import org.springframework.security.crypto.password.PasswordEncoder;import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;import org.springframework.security.web.authentication.logout.LogoutFilter;import javax.annotation.Resource;@Configuration@EnableGlobalMethodSecurity(prePostEnabled = true)public class SecurityConfig extends WebSecurityConfigurerAdapter { @Resource JwtFilter jwtFilter; @Resource AuthException authException; @Resource AccessException accessException; @Override @Bean public AuthenticationManager authenticationManagerBean() throws Exception { return super.authenticationManagerBean(); } @Override protected void configure(HttpSecurity httpSecurity) throws Exception { httpSecurity // CSRF禁用,因为不使用session .csrf().disable() // 基于token,所以不需要session .sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS) .and() // 过滤请求 .authorizeRequests() // 对于登录login 注册register 验证码captchaImage 允许匿名访问 .antMatchers("/user/login").anonymous() // 静态资源,permitAll 有没有登录都访问// .antMatchers(HttpMethod.GET, "/", "/*.html", "/**/*.html", "/**/*.css", "/**/*.js", "/profile/**").permitAll()// .antMatchers("/swagger-ui.html", "/swagger-resources/**", "/webjars/**", "/*/api-docs", "/druid/**").permitAll() // 除上面外的所有请求全部需要鉴权认证 .anyRequest().authenticated();// .and()// .headers().frameOptions().disable(); // 添加Logout filter// httpSecurity.logout().logoutUrl("/logout").logoutSuccessHandler(logoutSuccessHandler);// // 添加JWT filter httpSecurity.addFilterBefore(jwtFilter, UsernamePasswordAuthenticationFilter.class);// // 添加CORS filter// httpSecurity.addFilterBefore(corsFilter, JwtAuthenticationTokenFilter.class);// httpSecurity.addFilterBefore(corsFilter, LogoutFilter.class); // 添加自定义的认证和授权的自定义失败处理 httpSecurity.exceptionHandling().authenticationEntryPoint(authException).accessDeniedHandler(accessException); httpSecurity.cors(); } /** * 密码加密的规则 * @return */ @Bean public PasswordEncoder passwordEncoder(){ return new BCryptPasswordEncoder(); } /** * BCryptPasswordEncoder 的测试方法 * @param args */ public static void main(String[] args) { BCryptPasswordEncoder bCryptPasswordEncoder = new BCryptPasswordEncoder(); // 加密 String encode = bCryptPasswordEncoder.encode("123456"); System.out.println(encode); // 解密 boolean matches = bCryptPasswordEncoder.matches("123456", encode); System.out.println(matches); }} 自定义注解 123456789101112131415161718@Component("anthony")public class TestExpress { public boolean hasAnyAuthority(String authority){ Authentication authentication = SecurityContextHolder.getContext().getAuthentication(); RedisUser principal = (RedisUser) authentication.getPrincipal();// List<String> list = (List<String>)principal.getAuthorities(); List<SimpleGrantedAuthority> authorities = (List<SimpleGrantedAuthority>) principal.getAuthorities(); for (SimpleGrantedAuthority simpleGrantedAuthority : authorities) { if (simpleGrantedAuthority.getAuthority().equals(authority)) { return true; } } return false; }} 找Java包的路径 12# mac/usr/libexec/java_home -V