技术标签: spring Sequoiadb学习笔记 java 数据库
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE USER 'metauser'@'%' IDENTIFIED BY 'metauser';
GRANT ALL ON *.* TO 'metauser'@'%';
CREATE DATABASE metastore CHARACTER SET 'latin1' COLLATE 'latin1_bin';
FLUSH PRIVILEGES;
quit;
cat > /opt/apache-hive-1.2.2-bin/conf/hive-site.xml<< EOF
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>hive.test.authz.sstd.hs2.mode</name>
<value>true</value>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>hive.users.in.admin.role</name>
<value>root</value>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>9073</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.sequoiadb.spark.sql.hive.SequoiadbAuth</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
</property>
</configuration>
EOF
cp spark-authorizer-2.1.1.jar /opt/apache-hive-1.2.2-bin/auxlib
cp mysql-connector-java-5.1.7-bin.jar /opt/apache-hive-1.2.2-bin/auxlib
export HADOOP_HOME=/opt/hadoop-2.9.2
apache-hive-1.2.2-bin/bin/schematool -dbType mysql -initSchema
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
use metastore;
create table DBUSER (dbuser varchar(100), passwd char(50), primary key (dbuser));
insert into DBUSER(dbuser, passwd) values ('root', md5('admin'));
为 thrift server 预先创建了一个 root 的用户,密码为 ‘admin’ 未来如果要增加用户,用类似的 insert 命令添加
delimiter ||
create trigger dbs_trigger
before insert on DBS
for each row
begin
set new.OWNER_NAME="public";
set new.OWNER_TYPE="ROLE";
end ||
delimiter ;
cp spark-authorizer-2.1.1.jar /opt/spark/jars
cp mysql-connector-java-5.1.7-bin.jar /opt/spark/jars
cat > /opt/spark/conf/hive-site.xml<< EOF
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>hive.security.authorization.createtable.owner.grants</name>
<value>INSERT,SELECT</value>
</property>
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
</property>
<property>
<name>hive.test.authz.sstd.hs2.mode</name>
<value>true</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.sequoiadb.spark.sql.hive.SequoiadbAuth</value>
</property>
</configuration>
EOF
spark.sql.extensions=org.apache.ranger.authorization.spark.authorizer.SequoiadbSparkSQLExtension
./opt/spark/sbin/start-all.sh
./opt/spark/sbin/start-thriftserver.sh
netstat -anp|grep 10000
./bin/beeline -u jdbc:hive2://localhost:10000 -n root -p admin
在 spark sql 中创建数据表,执行建表的USER 对该表拥有 INSERT 和 SELECT 权限 如果其他 USER希望访问该表,应该在 hive 的thrift server 中,执行 grant 命令,以赋予其他 USER 对应权限
${HIVE_HOME}/bin/hiveserver2 >${HIVE_HOME}/hive_thriftserver.log 2>&1 &
./bin/beeline -u jdbc:hive2://localhost:9073 -n root -p admin
set role admin;
grant SELECT on table test to user USERNAME;
grant INSERT on table test to user USERNAME;
var db=new Sdb("localhost",11810);
db.createDomain("scottdomain",["datagroup1","datagroup2","datagroup3"],{
AutoSplit:true});
db.createCS("scott",{
Domain:"scottdomain"});
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
create database scott;
use scott;
create table emp(
empno int unsigned auto_increment primary key COMMENT '雇员编号',
ename varchar(15) COMMENT '雇员姓名',
job varchar(10) COMMENT '雇员职位',
mgr int unsigned COMMENT '雇员对应的领导的编号',
hiredate date COMMENT '雇员的雇佣日期',
sal decimal(7,2) COMMENT '雇员的基本工资',
comm decimal(7,2) COMMENT '奖金',
deptno int unsigned COMMENT '所在部门'
)ENGINE = sequoiadb COMMENT = "雇员表, sequoiadb: { table_options: { ShardingKey: { 'empno': 1 }, ShardingType: 'hash', 'Compressed': true, 'CompressionType': 'lzw', 'AutoSplit': true, 'EnsureShardingIndex': false } }";
INSERT INTO emp VALUES (7369,'SMITH','CLERK',7902,'1980-12-17',800,NULL,20);
INSERT INTO emp VALUES (7499,'ALLEN','SALESMAN',7698,'1981-2-20',1600,300,30);
INSERT INTO emp VALUES (7521,'WARD','SALESMAN',7698,'1981-2-22',1250,500,30);
INSERT INTO emp VALUES (7566,'JONES','MANAGER',7839,'1981-4-2',2975,NULL,20);
INSERT INTO emp VALUES (7654,'MARTIN','SALESMAN',7698,'1981-9-28',1250,1400,30);
INSERT INTO emp VALUES (7698,'BLAKE','MANAGER',7839,'1981-5-1',2850,NULL,30);
INSERT INTO emp VALUES (7782,'CLARK','MANAGER',7839,'1981-6-9',2450,NULL,10);
INSERT INTO emp VALUES (7788,'SCOTT','ANALYST',7566,'87-7-13',3000,NULL,20);
INSERT INTO emp VALUES (7839,'KING','PRESIDENT',NULL,'1981-11-17',5000,NULL,10);
INSERT INTO emp VALUES (7844,'TURNER','SALESMAN',7698,'1981-9-8',1500,100,30);
INSERT INTO emp VALUES (7876,'ADAMS','CLERK',7788,'87-7-13',1100,NULL,20);
INSERT INTO emp VALUES (7900,'JAMES','CLERK',7698,'1981-12-3',950,NULL,30);
INSERT INTO emp VALUES (7902,'FORD','ANALYST',7566,'1981-12-3',3000,NULL,20);
INSERT INTO emp VALUES (7934,'MILLER','CLERK',7782,'1982-1-23',1300,NULL,10);
<!--添加druid连接池依赖-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>1.1.18</version>
</dependency>
<!-- mybatis依赖 -->
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-core</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-extension</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!--添加 spark通过jdbc连接的依赖包-->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-metastore</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.2</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.4</version>
</dependency>
<dependency>
<groupId>org.apache.thrift</groupId>
<artifactId>libthrift</artifactId>
<version>0.9.2</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-hive-thriftserver -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive-thriftserver_2.11</artifactId>
<version>2.0.1</version>
<scope>provided</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-network-common -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-network-common_2.11</artifactId>
<version>2.0.1</version>
</dependency>
<!--添加SequoiaDB驱动包-->
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>sequoiadb-driver</artifactId>
<version>3.2.1</version>
</dependency>
<!--添加SequoiaDB和spark连接驱动包-->
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>spark-sequoiadb_2.11</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>spark-sequoiadb-scala_2.11.2</artifactId>
<version>1.12</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-sql -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.2.2</version>
</dependency>
<!--添加hive jdbc连接组件-->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.2.0</version>
</dependency>
server.port=8090
#datasource config
#指定连接池类型
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
#指定驱动
spring.datasource.driver-class-name=org.apache.hive.jdbc.HiveDriver
#指定连接地址、用户名和密码
spring.datasource.url=jdbc:hive2://192.168.80.132:10000/default
spring.datasource.username=root
spring.datasource.password=admin
#初始化连接数量
spring.datasource.druid.initialSize=1
#最大空闲连接数
spring.datasource.druid.minIdle=5
#最大并发连接数
spring.datasource.druid.maxActive=20
#配置获取连接等待超时的时间
spring.datasource.druid.maxWait=60000
#配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
spring.datasource.druid.timeBetweenEvictionRunMillis=60000
#配置一个连接在池中最小生存的时间,单位是毫秒
spring.datasource.druid.minEvictableIdelTimeMillis=300000
#用来检测连接是否有效的sql,要求是一个查询语句
spring.datasource.druid.validation-query=SELECT 1
#mybatis
#mybatis-plus.mapper-locations=classpath:mapper/*.xml
#mybatis-plus.configuration.cache-enabled=false
#映射xml文件位置
mybatis.mapper-locations=classpath:mapper/*.xml
#需要扫描实体类的位置
mybatis.type-aliases-package=com.sdb.spark.demo.entity
#spring mvc配置静态文件
spring.mvc.static-path-pattern=/static/**
#热部署
spring.devtools.restart.enabled=true
spring.devtools.restart.additional-paths=src/main/java
spring.devtools.restart.exclude=WEB-INF/**
/**
* 雇员表
*
* @author yousongxian
* @date 2020-07-29
*/
public class Emp {
private Integer empno;//雇员编号
private String ename;//雇员姓名、
private String job;//雇员职位
private Integer mgr;//雇员对应的领导的编号
private String hiredate;//雇员的雇佣日期
private Double sal;//雇员的基本工资
private Double comm;//奖金
private Integer deptno;//所在部门
}
//省略getter和setter方法
@Override
public String toString() {
return "Emp{" +
"empno=" + empno +
", ename='" + ename + '\'' +
", job='" + job + '\'' +
", mgr=" + mgr +
", hiredate='" + hiredate + '\'' +
", sal=" + sal +
", comm=" + comm +
", deptno=" + deptno +
'}';
}
<!-- 通用查询映射结果 -->
<resultMap id="BaseResultMap" type="com.sdb.spark.demo.entity.Emp">
<id column="empno" property="empno"/>
<result column="ename" property="ename" />
<result column="job" property="job" />
<result column="mgr" property="mgr" />
<result column="hiredate" property="hiredate" />
<result column="sal" property="sal" />
<result column="comm" property="comm" />
<result column="deptno" property="deptno" />
</resultMap>
<!-- 通用查询结果列 -->
<select id="selectAll" resultType="map" parameterType="string">
select * from ${tablename}
</select>
<update id="createTableEmp">
CREATE TABLE emp
(
empno INT,
ename STRING,
job STRING,
mgr INT,
hiredate date,
sal decimal(7,2),
comm decimal(7,2),
deptno INT
)
USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)</update>
<update id="createTableEmpSchema">
CREATE TABLE emp_schema USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)
</update>
<update id="createTableAsSelect" parameterType="map">
CREATE TABLE ${tablename} USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
domain 'scottdomain',
collectionspace 'scott',
collection #{tablename},
shardingkey '{"_id":1}',
shadingtype 'hash',
autosplit true
)AS ${condition}
</update>
<mapper namespace="com.sdb.spark.demo.mapper.EmpMapper">
注意:方法名称跟xml映射文件中定义的方法id必须一致
List<Map<String,Object>> selectAll(String tablename);//查询全部
int createTableEmp();//创建emp表
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String>map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
List<Map<String,Object>> selectAll(String tablename);
int createTableEmp();
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String> map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
@Service
public class EmpServiceImpl implements EmpService {
@Autowired
private EmpMapper empMapper;
@Override
public List<Map<String,Object>> selectAll(String tablename) {
return empMapper.selectAll(tablename);
}
@Override
public int createTableEmp() {
return empMapper.createTableEmp();
}
@Override
public int createTableEmpSchema() {
return empMapper.createTableEmpSchema();
}
@Override
public int createTableAsSelect(Map<String,String>map) {
return empMapper.createTableAsSelect(map);
}
@Override
public int insertEmp(Emp emp) {
return empMapper.insertEmp(emp);
}
}
@Autowired
private EmpService empService;
@Test
public List<Map<String,Object>> selectAll(){
String tablename="emp";
List<Map<String,Object>>resultlist=new ArrayList<Map<String, Object>>();
resultlist =empService.selectAll(tablename);
for(Map<String,Object>map:resultlist){
for(Map.Entry<String,Object>m:map.entrySet()){
System.out.print(m.getKey()+"="+m.getValue()+"\t");
}
System.out.println();
}
return resultlist;
}
@Test
public void createTable(){
empService.createTableEmp();
}
@Test
public void createTableEmpSchema(){
empService.createTableEmpSchema();
}
@Test
public void createTableAsSelect(){
Map<String,String> map=new HashMap<String, String>();
String tablename="emp_as_select";
//String condition="select empno,ename from emp";
map.put("tablename",tablename);
map.put("condition",condition);
if(map.get("tablename").equals("")||map.get("tablename")==null||map.get("condition").equals("")||map.get("condition")==null){
System.out.println("请输入正确表明和条件");
}else {
empService.createTableAsSelect(map);
}
}
@SpringBootApplication(scanBasePackages = {
"com.sdb.spark.demo.service.Impl"})
@MapperScan(basePackages = {
"com.sdb.spark.demo.mapper"})
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}
文章浏览阅读913次,点赞16次,收藏10次。总之,要做好虾皮店铺,不仅需要明确的定位和优质的产品,还需要精心的运营和持续的改进。通过不断优化店铺形象、制定有效的营销策略、提供优质的客户服务以及加强供应链管理等手段,您将能够在激烈的竞争中脱颖而出,实现店铺的长足发展。1.稳定的网络环境是基石,它需要经过技术手段的洗礼,将电脑或手机的底层硬件参数伪装成国外数据,以躲避平台通过IP进行的深度检测。这种真实性高的评价能够帮助商家获得更多的信任和认可,从而提升产品的排名和流量的分配。您可以关注行业动态,学习先进的经营理念和技术,以提高店铺的运营水平。
文章浏览阅读5k次,点赞11次,收藏43次。统计检验_统计测试 cd diagrams
文章浏览阅读332次。购物车_购物车案例请求数据地址
文章浏览阅读603次。DOTween 使用方法_tween number
文章浏览阅读1.2w次,点赞13次,收藏138次。F28335 ADC模块ADC转换模块A/D转换器(ADC)将模拟量转换为数字量通常要经过四个步骤:采样、保持、量化和编码。采样:将一个时间上连续变化的模拟量转化为时间上离散变化的模拟量。保持:将采样结果存储起来,直到下次采样,这个过程称作保持。一般,采样器和保持电路一起总称为采样保持电路。量化:将采样电平归化为与之接近的离散数字电平,这个过程称作量化。ADC关键指标分辨率:指数字量变化一个最小量时模拟信号的变化量,定义为满刻度与2^n的比值。分辨率又称为精度,通常以数字信号的位_dsp2833x_adc.c
文章浏览阅读1.8k次。 路径规划问题是机器人学研究的一个重要领域,它是指给定操作环境以及起始和目标的位置姿态,要求选择一条从起始点到目标点的路径,使运动物体(移动机器人或机械臂)能安全、无碰撞地通过所有的障碍物而达到目标位置。路径规划从研究对象上可分为关节式机械臂和移动机器人。一般来讲前者具有更多的自由度,而后者的作业范围要更大一些,这两类对象具有不同的特点,因此在研究方法上略有不同。在V-rep学习笔记:机器人路..._collision pairs
文章浏览阅读368次。DB2支持以下两种类型的表空间: 1、 系统管理存储器表空间(SMS-SYSTEM MANAGED STORAGE) 2、 数据库管理存储器表空间(DMS-DATABASE MANAGED STORAGE) SMS、DMS用户表空间的特性对照 特性 ..._db2
文章浏览阅读84次。正在开发中的游戏有个全屏功能--可以在window桌面背景上运行,就像一些视频播放器在桌面背景上播放一样的,花了个上午整了个Demo放出来留个纪念。实现功能:显示图标,双击图标执行相应的程序,右击图标弹出该图标对应得菜单,点击非图标区则弹出桌面菜单。需要完整工程可以点此下载:DesktopWindow.rar。程序效果图如下:在这个程序里,定义了一个XShellItem..._模拟实现windows桌面效果
文章浏览阅读944次。https://www.byhy.net/tut/webdev/django/01/_byhy.net
文章浏览阅读5.8k次,点赞13次,收藏57次。业务场景介绍:H5移动端支持微信支付 [ 微信支付分为微信内支付(JSAPI支付官方API)和微信外支付(H5支付官方API)] && 支付宝支付 [手机网站支付转 APP 支付 官方API ]订单生成逻辑:前端请求后端提交订单,后端去和微信或者支付宝对接生成订单(后续支付都是这个逻辑进行的对接)一、移动端微信支付,vue中如何玩?在移动端微信支付分为微信内支付和微信外支付。1.在订单组件中选择支付方式之后在支付页面先去判断是否是在微信内://判断是否微信 is__移动端支付宝微信支付vue项目怎么写
文章浏览阅读2k次,点赞5次,收藏9次。深度学习编译器主要为解决不同框架下训练的模型部署到指定的某些设备上时所遇到的一系列复杂的问题,即将各种深度学习训练框架的模型部署到各种硬件所面临的问题;_tvm编译器
文章浏览阅读2.8w次,点赞7次,收藏22次。要想检测移动硬盘的读写速度和是否有坏道,可使用HD Tune Pro硬盘工具软件。除了硬盘传输速率、健康状况、温度、随机存取、磁盘错误扫描、文件基准检测等常见功能外,该软件还能检测硬盘的固件版本、序列号、容量、缓存以及当前的Ultra DMA模式等。此外,目前市场上主流的2.5英寸320GB移动硬盘的数据读取速度大多在30MB/s左右;写入速度稍微慢一点,一般在25MB/s左右。..._怎么检测移动硬盘是不是新的