dubbo 泛化调用

import java.util.Map;

import java.util.concurrent.ConcurrentHashMap;

import com.alibaba.dubbo.config.ApplicationConfig;

import com.alibaba.dubbo.config.ConsumerConfig;

import com.alibaba.dubbo.config.ReferenceConfig;

import com.alibaba.dubbo.config.RegistryConfig;

import com.alibaba.dubbo.rpc.service.GenericService;

import com.ilovs.partner.dao.paretner.model.GatewayServiceInfo;

public class DubboUtil {

private static String appName;

private static String registryAddress;

private static Integer timeout;

private static Map<Integer, GenericService> services =

new ConcurrentHashMap<Integer, GenericService>();

public static GenericService getService(GatewayServiceInfo gatewayServiceInfo) {

GenericService service = services.get(gatewayServiceInfo.getId());

if (service != null) {

return service;

}

ApplicationConfig application = new ApplicationConfig();

application.setName(appName);

RegistryConfig registry = new RegistryConfig();

registry.setAddress(registryAddress);

ConsumerConfig consumer = new ConsumerConfig();

consumer.setTimeout(timeout);

consumer.setRetries(0);

ReferenceConfig<GenericService> referenceConfig = new ReferenceConfig<GenericService>();

referenceConfig.setApplication(application);

referenceConfig.setRegistry(registry);

referenceConfig.setConsumer(consumer);

     referenceConfig.setInterface(gatewayServiceInfo.getInvokeInterface());

     referenceConfig.setVersion(gatewayServiceInfo.getVersion());

     referenceConfig.setGeneric(true);

     service = referenceConfig.get();

     if (service != null) {

      services.put(gatewayServiceInfo.getId(), service);

     }

     

     return service;

}

public void setAppName(String appName) {

DubboUtil.appName = appName;

}

public void setRegistryAddress(String registryAddress) {

DubboUtil.registryAddress = registryAddress;

}

public void setTimeout(Integer timeout) {

DubboUtil.timeout = timeout;

}

}

import java.io.Serializable;
import java.util.Date;
/**
 * 
 * 
 * @author haibo.he
 * @version $Id: GatewayServiceInfo.java, v 0.1 2016年4月3日 下午6:48:21 haibo.he Exp $
 */
public class GatewayServiceInfo implements Serializable{
 
 /**
 * 
 */
 private static final long serialVersionUID = 3323918939039527640L;

/**
 * 服务id
 */
 private int id;
 
 /**
 * 服务名称
 */
 private String serviceName;
 /**
 * 访问的URL
 * 用scheme表示API调用协议 host+port+path表示访问地址
 * userInfo表示登录认证信息
 * 如:
 * 本地调用(new class instance and call) class://com.alibaba.foo.Bar 
 * 从SpringBeanFactory中获取bean执行调用 beanfactory://fooBar
 * ws远程调用 webservices://192.168.0.1:81/foo/bar
 * hessian远程调用 hessian://192.168.0.1:82/foo
 * rmi远程调用 rmi://192.168.0.1/foo
 */
 private String url;
 
 /**
 * 调用接口名
 * 用来表示调用的Object Inteface Name,
 * 只有调用协议是RMI等需要调用接口的协议时才需要制定,
 * 格式如下: com.alibaba.test.rmi.TestInteface * 
 */
 private String invokeInterface;
 
 /**
 * 调用方法名描述符,用来表示调用的方法名, 与tb_api_registry.name不一定相同
 */
 private String invokeMethod;
 
 /**
 * 版本号使用无符号整数标记,从1开始,每次加1 
 */
 private String version;
 
 /**
 * 状态:O -- 正常 N--未起用 S--暂停 I -- 过期
 */
 private String status;
 
 /**
 * 是否验证输入签名
 */
 private boolean isValidateSignIn;
 
 /**
 * 是否加签
 */
 private boolean isEndorseSignOut;
 
 /**
 * 创建时间
 */
 private Date gmtCreated;
 /**
 * 修改时间
 */
 private Date gmtModified;
 
 /**
 * 创建人
 */
 private String created;
 
 /**
 * 修改人
 */
 private String modified;
 
 /**
 * 是否公开,公开的接口不用验证用户权限
 */
 private boolean isPublic;
 
 public int getId() {
 return id;
 }

public void setId(int id) {
 this.id = id;
 }

public String getServiceName() {
 return serviceName;
 }

public void setServiceName(String serviceName) {
 this.serviceName = serviceName;
 }

public String getUrl() {
 return url;
 }

public void setUrl(String url) {
 this.url = url;
 }

public String getInvokeInterface() {
 return invokeInterface;
 }

public void setInvokeInterface(String invokeInterface) {
 this.invokeInterface = invokeInterface;
 }

public String getInvokeMethod() {
 return invokeMethod;
 }

public void setInvokeMethod(String invokeMethod) {
 this.invokeMethod = invokeMethod;
 }

public String getVersion() {
 return version;
 }

public void setVersion(String version) {
 this.version = version;
 }

public String getStatus() {
 return status;
 }

public void setStatus(String status) {
 this.status = status;
 }

public boolean isValidateSignIn() {
 return isValidateSignIn;
 }
 
 public void setValidateSignIn(boolean isValidateSignIn) {
 this.isValidateSignIn = isValidateSignIn;
 }

public boolean isEndorseSignOut() {
 return isEndorseSignOut;
 }

public void setEndorseSignOut(boolean isEndorseSignOut) {
 this.isEndorseSignOut = isEndorseSignOut;
 }

public Date getGmtCreated() {
 return gmtCreated;
 }

public void setGmtCreated(Date gmtCreated) {
 this.gmtCreated = gmtCreated;
 }

public Date getGmtModified() {
 return gmtModified;
 }

public void setGmtModified(Date gmtModified) {
 this.gmtModified = gmtModified;
 }

public String getCreated() {
 return created;
 }

public void setCreated(String created) {
 this.created = created;
 }

public String getModified() {
 return modified;
 }

public void setModified(String modified) {
 this.modified = modified;
 }

public boolean isPublic() {
 return isPublic;
 }
 
 public void setPublic(boolean isPublic) {
 this.isPublic = isPublic;
 }
}



     2 dubbo调用例子

   GenericService genericService = DubboUtil.getService(gatewayServiceInfo);

            Object invokedResult = genericService.$invoke(gatewayServiceInfo.getInvokeMethod(), paramTypes, paramValues);

MySQL5.7 sql_mode only_full_group_by

1 MySQL 官方文档:

https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html

https://blog.csdn.net/kk185800961/article/details/79426041

https://blog.csdn.net/wangyunfeis/article/details/77911704

2 执行的SQL:

select t3.attribute_value_id,
 t3.attribute_value_code
 from category_rel_attribute t1
 INNER JOIN
 category_rel_attribute_rel_value t2 on t1.category_attribute_id=
 t2.category_attribute_id
 INNER JOIN
 category_attribute_value_code_config t3 on
 t2.category_attribute_value_id= t3.attribute_value_id
 where
 t1.category_id= 1
 and t1.is_sku_attr= 'Y'
 GROUP BY
 t2.category_attribute_value_id

3 报错

Expression #2 of SELECT list is not in GROUP BY 
clause and contains nonaggregated column 
'pms.t3.attribute_value_code' which is not functionally 
dependent on columns in GROUP BY clause; this is 
incompatible with sql_mode=only_full_group_by

4 select @@sql_mode

 

5 去掉ONLY_FULL_GROUP_BY

mysql> set sql_mode=’STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION’;

Query OK, 0 rows affected (0.01 sec)

lombok介绍

1 lombok介绍
lombok 提供了简单的注解Annotation的形式来帮助我们简化消除一些必须有但显得很臃肿的 java 代码,比如 javabean 属性的get 方法,set方法,无参数构造,全部参数的构造函数等
2 lombok 使用
lombok是基于注解的,我们先看看它常用注解的作用
@Data   :注解在类上;提供类所有属性的getting 和 setting 方法,此外还提供了equals、canEqual、hashCode、toString 方法         @Setter:注解在属性上;为属性提供 setting 方法         @Getter:注解在属性上;为属性提供 getting 方法         @Log4j :注解在类上;为类提供一个 属性名为log 的 log4j 日志对象         @NoArgsConstructor:注解在类上;为类提供一个无参的构造方法         @AllArgsConstructor:注解在类上;为类提供一个全参的构造方法
2.1 使用lombok前的做法
public class CarModelDO implements Serializable {
    /**
    *
    */
   private static final long serialVersionUID = 1L;
   private String id;
    private String brand;
    private String brandCode;
    private String series;
    private String seriesCode;
    private String seriesRemark;
    private int factor_1;
    private Date dtCreate;
    private Date dtUpdate;
   public String getId() {
      return id;
   }
   public void setId(String id) {
      this.id = id;
   }
   public String getBrand() {
      return brand;
   }
   public void setBrand(String brand) {
      this.brand = brand;
   }
   public String getBrandCode() {
      return brandCode;
   }
   public voidsetBrandCode(String brandCode) {
      this.brandCode = brandCode;
   }
   public String getSeries() {
      return series;
   }
   public void setSeries(Stringseries) {
      this.series = series;
   }
   public String getSeriesCode() {
      return seriesCode;
   }
   public voidsetSeriesCode(String seriesCode) {
      this.seriesCode = seriesCode;
   }
   public String getSeriesRemark() {
      return seriesRemark;
   }
   public voidsetSeriesRemark(String seriesRemark) {
      this.seriesRemark = seriesRemark;
   }
   public int getFactor_1() {
      return factor_1;
   }
   public void setFactor_1(int factor_1) {
      this.factor_1 = factor_1;
   }
   public Date getDtCreate() {
      return dtCreate;
   }
   public void setDtCreate(DatedtCreate) {
      this.dtCreate = dtCreate;
   }
   public Date getDtUpdate() {
      return dtUpdate;
   }
   public void setDtUpdate(DatedtUpdate) {
      this.dtUpdate = dtUpdate;
   }
   
}
可以看到这是我们常规的写法
2.2 使用lombok做法
 
 
 @Data
public class CarModelDOimplements Serializable {
    private String id;
    private String brand;
    private String brandCode;
    private String series;
    private String seriesCode;
    private String seriesRemark;
    private int factor_1;
    private Date dtCreate;
    private Date dtUpdate;
}
 
反编译后的代码:
public class CarModelDOimplements Serializable {
   private String id;
   private String brand;
   private String brandCode;
   private String series;
   private String seriesCode;
   private String seriesRemark;
   private int factor_1;
   private Date dtCreate;
   private Date dtUpdate;
   public String toString() {
      return “CarModelDO(id=” +getId() + “, brand=” + getBrand() + “,brandCode=” + getBrandCode() + “,series=”
            + getSeries() + “,seriesCode=” + getSeriesCode() + “,seriesRemark=” + getSeriesRemark()
            + “,factor_1=” + getFactor_1() + “,dtCreate=” + getDtCreate() + “,dtUpdate=” + getDtUpdate() + “)”;
   }
   public inthashCode() {
      int PRIME = 59;
      int result = 1;
      Object $id =getId();
      result = result * 59 + ($id == null ? 43 : $id.hashCode());
      Object $brand =getBrand();
      result = result * 59 + ($brand == null ? 43 : $brand.hashCode());
      Object $brandCode =getBrandCode();
      result = result * 59 + ($brandCode == null ? 43 : $brandCode.hashCode());
      Object $series =getSeries();
      result = result * 59 + ($series == null ? 43 : $series.hashCode());
      Object $seriesCode =getSeriesCode();
      result = result * 59 + ($seriesCode == null ? 43 : $seriesCode.hashCode());
      Object $seriesRemark =getSeriesRemark();
      result = result * 59 + ($seriesRemark == null ? 43 : $seriesRemark.hashCode());
      result = result * 59 +getFactor_1();
      Object $dtCreate =getDtCreate();
      result = result * 59 + ($dtCreate == null ? 43 : $dtCreate.hashCode());
      Object $dtUpdate =getDtUpdate();
      result = result * 59 + ($dtUpdate == null ? 43 : $dtUpdate.hashCode());
      return result;
   }
   protected booleancanEqual(Object other) {
      return other instanceofCarModelDO;
   }
   public boolean equals(Objecto) {
      if (o == this) {
         return true;
      }
      if (!(o instanceofCarModelDO)) {
         return false;
      }
      CarModelDO other =(CarModelDO) o;
      if (!other.canEqual(this)) {
         return false;
      }
      Object this$id =getId();
      Object other$id = other.getId();
      if (this$id == null ? other$id != null : !this$id.equals(other$id)) {
         return false;
      }
      Object this$brand =getBrand();
      Object other$brand = other.getBrand();
      if (this$brand == null ? other$brand != null : !this$brand.equals(other$brand)) {
         return false;
      }
      Object this$brandCode =getBrandCode();
      Object other$brandCode = other.getBrandCode();
      if (this$brandCode == null ? other$brandCode != null : !this$brandCode.equals(other$brandCode)) {
         return false;
      }
      Object this$series =getSeries();
      Object other$series = other.getSeries();
      if (this$series == null ? other$series != null : !this$series.equals(other$series)) {
         return false;
      }
      Object this$seriesCode =getSeriesCode();
      Object other$seriesCode = other.getSeriesCode();
      if (this$seriesCode == null ? other$seriesCode != null : !this$seriesCode.equals(other$seriesCode)) {
         return false;
      }
      Object this$seriesRemark =getSeriesRemark();
      Object other$seriesRemark = other.getSeriesRemark();
      if (this$seriesRemark == null ? other$seriesRemark != null : !this$seriesRemark.equals(other$seriesRemark)) {
         return false;
      }
      if(getFactor_1() != other.getFactor_1()) {
         return false;
      }
      Object this$dtCreate =getDtCreate();
      Object other$dtCreate = other.getDtCreate();
      if (this$dtCreate == null ? other$dtCreate != null : !this$dtCreate.equals(other$dtCreate)) {
         return false;
      }
      Object this$dtUpdate =getDtUpdate();
      Object other$dtUpdate = other.getDtUpdate();
      return this$dtUpdate == null ? other$dtUpdate == null : this$dtUpdate.equals(other$dtUpdate);
   }
   public voidsetDtUpdate(Date dtUpdate) {
      this.dtUpdate = dtUpdate;
   }
   public voidsetDtCreate(Date dtCreate) {
      this.dtCreate = dtCreate;
   }
   public voidsetFactor_1(int factor_1) {
      this.factor_1 = factor_1;
   }
   public voidsetSeriesRemark(String seriesRemark) {
      this.seriesRemark = seriesRemark;
   }
   public voidsetSeriesCode(String seriesCode) {
      this.seriesCode = seriesCode;
   }
   public voidsetSeries(String series) {
      this.series = series;
   }
   public voidsetBrandCode(String brandCode) {
      this.brandCode = brandCode;
   }
   public voidsetBrand(String brand) {
      this.brand = brand;
   }
   public voidsetId(String id) {
      this.id = id;
   }
   public Date getDtUpdate() {
      return this.dtUpdate;
   }
   public Date getDtCreate() {
      return this.dtCreate;
   }
   public intgetFactor_1() {
      return this.factor_1;
   }
   public StringgetSeriesRemark() {
      return this.seriesRemark;
   }
   public StringgetSeriesCode() {
      return this.seriesCode;
   }
   public String getSeries(){
      return this.series;
   }
   public StringgetBrandCode() {
      return this.brandCode;
   }
   public String getBrand() {
      return this.brand;
   }
   public String getId() {
      return this.id;
   }
}
 
和上面2.1的代码是一样的,而且还给我们生成了其它方法。
其它Annotaion 使用参照上面的用法。
2.3 开发使用方法
1 编译类路径下增加 lombok.jar,下面是maven方式
<dependency>
<groupId>org.projectlombok</groupId>
    <artifactId>lombok</artifactId>
    <version>1.16.8</version>
    <scope>provided</scope>
 </dependency>
2 在需要使用javabean类上加上注解
3 java源代码编译后经过得到对应的字节码,见反编译
3 Mac 下eclipse 安装lombok
eclipse编译java源文件使用的是EclipseCompiler for Java (ECJ).要想使ECJ支持lombok,得进行设置,在Eclipse安装目录中eclipse.app的eclipse.ini文件中添加如下两行设置,以我的机器为例,
在/usr/local/eclipse-jee/Eclipse.app/Contents/MacOS/eclipse.ini最后增加如下两行
-Xbootclasspath/a:lombok.jar
-javaagent:/usr/local/eclipse-jee/lombok.jar
Intellij Idea 可以选择编译工具为oraclejavac。
4 原理介绍
2005.2.1 Sun公司在提交了JSR 269,用于支持在编译期对annotation进行处理(this JSRwill define APIs to allow annotation processors to be created using a standardpluggable API. This will simplify the task of creating annotation processorsand will also allow automation of the discovery of appropriate annotationprocessors for a given source file.)
自从java 6起,javac就支持“JSR 269 Pluggable Annotation Processing API”规范,只要程序实现了该API,就能在javac运行的时候得到调用。 而 Lombok就是实现该规范,规范介绍:https://jcp.org/en/jsr/detail?id=269
oracle JDK编译java源代码的简单介绍图:
1.分析和输入到符号表(Parse and Enter)
   Parse过程所做的工作有词法和语法分析。词法分析要完成将代码字符串转变为Token序列。语法分析则是根据语法将Token序列生成抽象语法树。
   Enter过程将符号输入到符号表,通常包括确定类的超类型和接口、根据需要添加默认构造器、将类中出现的符号输入类自身的符号表中等。
2.注解处理(Annotation Processing)
   该步骤主要用于处理用户自定义的annotation,可能带来的好处是基于annotation来生成附加的代码或进行一些特殊的检查,从而节省一些共同的代码的编写。此功能基于JSR269,在Sun JDK6 中提供了支持,在注解处理完之后,再次进入上一步骤。
3.语义分析和生成class文件(Analyse and Generate)
   该步骤基于抽象语法树进行一系列的语义分析,包括将语法树中的名字、表达式等元素与变量、方法、类型等联系在一起;检查变量使用前是否已声明;推导泛型方法的类型参数;检查类型匹配性;进行常量折叠;检查所有语句都可到达;检查所有checked exception 都被捕获或抛出;检查变量的确定性赋值(例如有返回值的方法必须确定有返回值);检查变量的确定性不重复赋值(例如声明为final的变量等);解除语法糖(消除if(false){…}形式的无用代码;将泛型Java转为普通Java;将含有语法糖的语法树改为含有简单语言结构的语法树,例如foreach循环、自动装箱/拆箱等)。
    在完成了语义分析后,开始生成class文件,生成的步骤为:首先将实例成员初始化器收集到构造器中,将静态成员初始化收集为<clinit>();接着将抽象语法树生成字节码,采用的方法为后序遍历语法树,并进行最后的少量代码转换(例如String相加转变为StringBuilder操作);最后从符号表生成class文件。
4.class文件内容
   class文件中并不仅仅存放了字节码,还存放了很多辅助JVM来执行class的附加信息,一个class文件包含了以下信息:
    1)结构信息
     包括class文件格式版本号及各部分的数量与大小的信息
    2)元数据
     简单来说,可以认为元数据对应的就是Java源码中“声明”与“常量”的信息,主要有:类、继承的超类、实现的接口声明信息、域与方法声明信息和常量池。
    3)方法信息
      方法信息对应的就是Java源码中的“语句”与“表达式”对应的信息,主要有:字节码、异常处理器表、求值栈与局部变量区大小、求值栈的类型记录、调试用符号信息。

大型项目结构参考

根据以往的项目经验总结了一份项目结构图,以供参考,欢迎大家一起交流。
1 工程图预览
2 具体模块介绍
为表现清晰,图中的依赖都表现为直接依赖,整个项目的结构为maven的项目,
注意:maven的依赖是可以传递的,比方说A->B->C->D,A对B是直接依赖,A->C和D是传递依赖,也就是说A也依赖C和D模块。
common模块
这个是整个工程的通用模块,不依赖其它模块,主要功能可以提供工具类,日志类等
dao模块,database acessobject,主要是访问数据库和包含数据库的DO(database object),一般是一个表一个访问类,可以依赖model模块,model模块主要是领域模型的声明
remoting模块:
主要功能类似于数据源,主要是以rpc或http等方式远程调用外部的接口,诸如dubbo(阿里巴巴SOA服务化治理框架),http方式,也可以调用外部的webservice接口,访问mongodb数据库等。
Model模块:model模型层,主要是对外服务接口的领域对象声明,项目内部也是可以用的。
Service模块:服务接口声明层,主要是对外服务接口的声明,如果我们用dubbo的话是非常方便的,调用方之间依赖项目的这个组件即可,这个模块一般是在公司内部以第三方库的模式发布到maven仓库,其依赖的模块为model模块,返回的结果和接收的参数对象(自定义的类),这里为什么service和model要分开呢,因为我们在做服务化的时候,服务接口有变化(升级,降级,下线)的时候,有可能是不需要改动接口的,而是接口的返回结果或接口参数(自定义类)增加一个或几个字段即可,而服务接口不需要改动。
Biz-common模块:通用的业务模块:诸如日志拦截器,权限的校验,通用的MQ调用等,通用等业务功能如:访问统一的用户信息,权限信息等。这个不是必须的,一般适用于大型的业务项目
Biz-module1:具体的业务模块实现,比如实现service模块的接口声明,访问数据库和远程接口等。
Biz-module2:和biz-module1模块功能类似,在大项目的开发中,就需要划分很多的业务模块和分模块来开发了,可以根据项目业务情况自行决定。
 Web模块:web模块一般是项目的应用层【biz层实现远程接口的业务层也是可以直接对外暴露接口的,如dubbo接口,hessian接口】,包含页面控制层和试图层,WEB模块也可以多是多个,这个也是表现在大型项目的开发中

mybatis invalid comparison: java.util.Date and java.lang.String 异常

1 错误详情

mybatis invalid comparison: java.util.Date and java.lang.String

Caused by: java.lang.IllegalArgumentException: invalid comparison: java.util.Date and java.lang.String
 at org.apache.ibatis.ognl.OgnlOps.compareWithConversion(OgnlOps.java:92)
 at org.apache.ibatis.ognl.OgnlOps.isEqual(OgnlOps.java:142)
 at org.apache.ibatis.ognl.OgnlOps.equal(OgnlOps.java:794)
 at org.apache.ibatis.ognl.ASTNotEq.getValueBody(ASTNotEq.java:53)
 at org.apache.ibatis.ognl.SimpleNode.evaluateGetValueBody(SimpleNode.java:212)
 at org.apache.ibatis.ognl.SimpleNode.getValue(SimpleNode.java:258)
 at org.apache.ibatis.ognl.ASTAnd.getValueBody(ASTAnd.java:61)
 at org.apache.ibatis.ognl.SimpleNode.evaluateGetValueBody(SimpleNode.java:212)
 at org.apache.ibatis.ognl.SimpleNode.getValue(SimpleNode.java:258)
 at org.apache.ibatis.ognl.Ognl.getValue(Ognl.java:494)
 at org.apache.ibatis.ognl.Ognl.getValue(Ognl.java:458)
 at org.apache.ibatis.scripting.xmltags.OgnlCache.getValue(OgnlCache.java:44)
 at org.apache.ibatis.scripting.xmltags.ExpressionEvaluator.evaluateBoolean(ExpressionEvaluator.java:32)
 at org.apache.ibatis.scripting.xmltags.IfSqlNode.apply(IfSqlNode.java:34)
 at org.apache.ibatis.scripting.xmltags.MixedSqlNode.apply(MixedSqlNode.java:33)
 at org.apache.ibatis.scripting.xmltags.TrimSqlNode.apply(TrimSqlNode.java:55)
 at org.apache.ibatis.scripting.xmltags.MixedSqlNode.apply(MixedSqlNode.java:33)
 at org.apache.ibatis.scripting.xmltags.DynamicSqlSource.getBoundSql(DynamicSqlSource.java:41)
 at org.apache.ibatis.mapping.MappedStatement.getBoundSql(MappedStatement.java:280)
 at org.apache.ibatis.executor.CachingExecutor.query(CachingExecutor.java:80)
 at org.apache.ibatis.session.defaults.DefaultSqlSession.selectList(DefaultSqlSession.java:120)
 ... 7 more

原因就是mybatis 3.4.0中比较时间有个有bug,不能把时间当字符串比较,去掉

nextExecuteTime != '' 的判断即可

hadoop-0.20.2 fs 命令指南

hadoop-0.20.2 fs 命令指南

hehaibolocal:hadoop-0.20.2-tmp hehaibo$ hadoop fs -help

hadoop fs is the command to execute fs commands. The full syntax is: 

hadoop fs [-fs <local | file system URI>] [-conf <configuration file>]

 [-D <property=value>] [-ls <path>] [-lsr <path>] [-du <path>]

 [-dus <path>] [-mv <src> <dst>] [-cp <src> <dst>] [-rm [-skipTrash] <src>]

 [-rmr [-skipTrash] <src>] [-put <localsrc> ... <dst>] [-copyFromLocal <localsrc> ... <dst>]

 [-moveFromLocal <localsrc> ... <dst>] [-get [-ignoreCrc] [-crc] <src> <localdst>

 [-getmerge <src> <localdst> [addnl]] [-cat <src>]

 [-copyToLocal [-ignoreCrc] [-crc] <src> <localdst>] [-moveToLocal <src> <localdst>]

 [-mkdir <path>] [-report] [-setrep [-R] [-w] <rep> <path/file>]

 [-touchz <path>] [-test -[ezd] <path>] [-stat [format] <path>]

 [-tail [-f] <path>] [-text <path>]

 [-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...]

 [-chown [-R] [OWNER][:[GROUP]] PATH...]

 [-chgrp [-R] GROUP PATH...]

 [-count[-q] <path>]

 [-help [cmd]]

-fs [local | <file system URI>]:  Specify the file system to use.

  If not specified, the current configuration is used, 

  taken from the following, in increasing precedence: 

   core-default.xml inside the hadoop jar file 

   core-site.xml in $HADOOP_CONF_DIR 

  'local' means use the local file system as your DFS. 

  <file system URI> specifies a particular file system to 

  contact. This argument is optional but if used must appear

  appear first on the command line.  Exactly one additional

  argument must be specified. 

-ls <path>:  List the contents that match the specified file pattern. If

  path is not specified, the contents of /user/<currentUser>

  will be listed. Directory entries are of the form 

   dirName (full path) <dir> 

  and file entries are of the form 

   fileName(full path) <r n> size 

  where n is the number of replicas specified for the file 

  and size is the size of the file, in bytes.

-lsr <path>:  Recursively list the contents that match the specified

  file pattern.  Behaves very similarly to hadoop fs -ls,

  except that the data is shown for all the entries in the

  subtree.

-du <path>:  Show the amount of space, in bytes, used by the files that 

  match the specified file pattern.  Equivalent to the unix

  command "du -sb <path>/*" in case of a directory, 

  and to "du -b <path>" in case of a file.

  The output is in the form 

   name(full path) size (in bytes)

-dus <path>:  Show the amount of space, in bytes, used by the files that 

  match the specified file pattern.  Equivalent to the unix

  command "du -sb"  The output is in the form 

   name(full path) size (in bytes)

-mv <src> <dst>:   Move files that match the specified file pattern <src>

  to a destination <dst>.  When moving multiple files, the 

  destination must be a directory. 

-cp <src> <dst>:   Copy files that match the file pattern <src> to a 

  destination.  When copying multiple files, the destination

  must be a directory. 

-rm [-skipTrash] <src>:  Delete all files that match the specified file pattern.

  Equivalent to the Unix command "rm <src>"

  -skipTrash option bypasses trash, if enabled, and immediately

deletes <src>

-rmr [-skipTrash] <src>:  Remove all directories which match the specified file 

  pattern. Equivalent to the Unix command "rm -rf <src>"

  -skipTrash option bypasses trash, if enabled, and immediately

deletes <src>

-put <localsrc> ... <dst>:  Copy files from the local file system 

  into fs. 

-copyFromLocal <localsrc> ... <dst>: Identical to the -put command.

-moveFromLocal <localsrc> ... <dst>: Same as -put, except that the source is

  deleted after it's copied.

-get [-ignoreCrc] [-crc] <src> <localdst>:  Copy files that match the file pattern <src> 

  to the local name.  <src> is kept.  When copying mutiple, 

  files, the destination must be a directory. 

-getmerge <src> <localdst>:  Get all the files in the directories that 

  match the source file pattern and merge and sort them to only

  one file on local fs. <src> is kept.

-cat <src>:  Fetch all files that match the file pattern <src> 

  and display their content on stdout.

-copyToLocal [-ignoreCrc] [-crc] <src> <localdst>:  Identical to the -get command.

-moveToLocal <src> <localdst>:  Not implemented yet 

-mkdir <path>:  Create a directory in specified location. 

-setrep [-R] [-w] <rep> <path/file>:  Set the replication level of a file. 

  The -R flag requests a recursive change of replication level 

  for an entire tree.

-tail [-f] <file>:  Show the last 1KB of the file. 

  The -f option shows apended data as the file grows. 

-touchz <path>: Write a timestamp in yyyy-MM-dd HH:mm:ss format

  in a file at <path>. An error is returned if the file exists with non-zero length

-test -[ezd] <path>: If file { exists, has zero length, is a directory

  then return 0, else return 1.

-text <src>:  Takes a source file and outputs the file in text format.

  The allowed formats are zip and TextRecordInputStream.

-stat [format] <path>: Print statistics about the file/directory at <path>

  in the specified format. Format accepts filesize in blocks (%b), filename (%n),

  block size (%o), replication (%r), modification date (%y, %Y)

-chmod [-R] <MODE[,MODE]... | OCTALMODE> PATH...

  Changes permissions of a file.

  This works similar to shell's chmod with a few exceptions.

 -R modifies the files recursively. This is the only option

  currently supported.

 MODE Mode is same as mode used for chmod shell command.

  Only letters recognized are 'rwxX'. E.g. a+r,g-w,+rwx,o=r

 OCTALMODE Mode specifed in 3 digits. Unlike shell command,

  this requires all three digits.

  E.g. 754 is same as u=rwx,g=rx,o=r

  If none of 'augo' is specified, 'a' is assumed and unlike

  shell command, no umask is applied.

-chown [-R] [OWNER][:[GROUP]] PATH...

  Changes owner and group of a file.

  This is similar to shell's chown with a few exceptions.

 -R modifies the files recursively. This is the only option

  currently supported.

  If only owner or group is specified then only owner or

  group is modified.

  The owner and group names may only cosists of digits, alphabet,

  and any of '-_.@/' i.e. [-_.@/a-zA-Z0-9]. The names are case

  sensitive.

  WARNING: Avoid using '.' to separate user name and group though

  Linux allows it. If user names have dots in them and you are

  using local file system, you might see surprising results since

  shell command 'chown' is used for local files.

-chgrp [-R] GROUP PATH...

  This is equivalent to -chown ... :GROUP ...

-count[-q] <path>: Count the number of directories, files and bytes under the paths

  that match the specified file pattern.  The output columns are:

  DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or

  QUOTA REMAINING_QUATA SPACE_QUOTA REMAINING_SPACE_QUOTA 

        DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME

-help [cmd]:  Displays help for given command or all commands if none

  is specified.

hehaibolocal:hadoop-0.20.2-tmp hehaibo$ 

Tomcat 进程意外退出-tomcat stopInternal

最近开发环境的tomcat总是莫名其妙的挂了。

Aug 04, 2018 10:50:04 AM org.apache.coyote.AbstractProtocol pause
INFO: Pausing ProtocolHandler ["http-bio-18080"]
Aug 04, 2018 10:50:04 AM org.apache.coyote.AbstractProtocol pause
INFO: Pausing ProtocolHandler ["ajp-bio-58080"]
Aug 04, 2018 10:50:04 AM org.apache.catalina.core.StandardService stopInternal
INFO: Stopping service Catalina
20180804:10:50:04.547 [localhost-startStop-2] [org.springframework.context.support.AbstractApplicationContext-982] INFO Closing WebApplicationContext for namespace 'optimus-servlet': startup date [Sat Aug 04 10:07:59 CST 2018]; parent: Root WebApplicationContext
Aug 04, 2018 10:50:05 AM org.apache.catalina.loader.WebappClassLoaderBase clearReferencesJdbc
SEVERE: The web application [] registered the JDBC driver [com.alibaba.druid.proxy.DruidDriver] but failed to unregister it when the web application was stopped. To prevent a memory leak, the JDBC Driver has been forcibly unregistered.
Aug 04, 2018 10:50:05 AM org.apache.catalina.loader.WebappClassLoaderBase clearReferencesJdbc
SEVERE: The web application [] registered the JDBC driver [com.mysql.jdbc.Driver] but failed to unregister it when the web application was stopped. To prevent a memory leak, the JDBC Driver has been forcibly unregistered.
Aug 04, 2018 10:50:05 AM org.apache.catalina.loader.WebappClassLoaderBase clearReferencesThreads

Aug 04, 2018 10:50:05 AM org.apache.coyote.AbstractProtocol stop
INFO: Stopping ProtocolHandler ["http-bio-18080"]
Aug 04, 2018 10:50:05 AM org.apache.coyote.AbstractProtocol stop
INFO: Stopping ProtocolHandler ["ajp-bio-58080"]
Aug 04, 2018 10:50:05 AM org.apache.coyote.AbstractProtocol destroy
INFO: Destroying ProtocolHandler ["http-bio-18080"]
Aug 04, 2018 10:50:05 AM org.apache.coyote.AbstractProtocol destroy
INFO: Destroying ProtocolHandler ["ajp-bio-58080"]

Google搜索引擎 找到答案:

转载自并发编程网 – ifeve.com  链接地址: Tomcat进程意外退出的问题分析

另外见:

tomcat server组件监听shutdown命令关闭服务器之源码分析

https://blog.csdn.net/joenqc/article/details/75212775

启动tomcat的脚本:

按照并发编程网大神的提示:加上

#使用监视模式
set -m

完美解决问题。

#!/usr/bin/env sh
# 参数1 分支 参数2 配置项
#使用监视模式
set -m
p1=18080;
p2=$1;
p3=$2;
if [ $# -lt 1 ]; then
 p2=develop
fi
if [ $# -lt 2 ]; then
 p3=xjf221
fi
sysname=test
cd `dirname $0`
cur_dir_temp=`pwd`;
cur_dir=$(dirname ${cur_dir_temp})
echo ${cur_dir}

git reset --hard
git clean -xdf
git checkout ${p2};
git pull;

cd ${cur_dir}

#配置Tomcat
cd `pwd`/script && sh setup_tomcat.sh ${sysname} ${p1};
cd ${cur_dir}

mvn clean install -DskipTests

ps auxwww | grep java | grep ${p1} | grep ${sysname} | awk '{print $2}' | xargs kill -9 2>/dev/null;
~/tomcat/tomcat_${sysname}_${p1}/bin/shutdown.sh ;
rm -rf ~/tomcat/tomcat_${sysname}_${p1}/webapps/ROOT.war;
rm -rf ~/tomcat/tomcat_${sysname}_${p1}/webapps/ROOT;
unzip ${cur_dir}/target/ROOT.war -d ~/tomcat/tomcat_${sysname}_${p1}/webapps/ROOT;
rm -rf catalina.pid 2>/dev/null;
~/tomcat/tomcat_${sysname}_${p1}/bin/startup.sh;

cd ~/tomcat/tomcat_${sysname}_${p1}/logs
tailf catalina.out

mysql创建用户和修改非Root用户密码

1 创建用户

create user 'test'@'%' identified by '123456';

flush privileges;

2 赋予用户数据库权限

grant all privileges on `testdb`.* to 'test'@'%' identified by '123456';
flush privileges;

3 修改非Root用户密码

https://dev.mysql.com/doc/refman/5.7/en/resetting-permissions.html

1 MySQL 5.7.6 and later:

ALTER USER 'root'@'localhost' IDENTIFIED BY 'MyNewPass';

2 MySQL 5.7.5 and earlier:

SET PASSWORD FOR 'root'@'localhost' = PASSWORD('MyNewPass');

mac 下 hadoop-0.20.2单机版环境安装

1 安装java环境安装

略…

hehaibolocal:~ hehaibo$ java -version

java version "1.8.0_91"

Java(TM) SE Runtime Environment (build 1.8.0_91-b14)

Java HotSpot(TM) 64-Bit Server VM (build 25.91-b14, mixed mode)

hehaibolocal:~ hehaibo$ 

2 hadoop 安装

从 https://archive.apache.org/dist/hadoop/common/hadoop-0.20.2/hadoop-0.20.2.tar.gz 下载到 磁盘 /Users/hehaibo/hadoop/ 

执行

hehaibolocal:hadoop hehaibo$ tar xvf hadoop-0.20.2.tar.gz

安装后的目录如下:/Users/hehaibo/hadoop/hadoop-0.20.2

 

3 配置hadooop环境变量

sudo vi /etc/profile

输入:

HADOOP_HOME=/Users/hehaibo/hadoop/hadoop-0.20.2

PATH=".;$PATH:/usr/local/bin:$JAVA_HOME/bin:$ANT_HOME/bin:$MAVEN_HOME/bin:$HADOOP_HOME/bin

4 验证安装的版本

hehaibolocal:hadoop-0.20.2 hehaibo$ hadoop version

Hadoop 0.20.2

Subversion https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20 -r 911707

Compiled by chrisdo on Fri Feb 19 08:07:34 UTC 2010

hehaibolocal:hadoop-0.20.2 hehaibo$ 

5 配置hadoop环境

5.1 配置conf/core-site.xml

<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>

<!-- 设置namenode所在主机,端口号是9000 -->

<name>fs.default.name</name>

<value>hdfs://localhost:9000/</value>

</property>

</configuration>

5.2 配置conf/hdfs-site.xml

<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!-- 设置HDFS文件系统的元信息保存目录,可以设置多个,逗号分割 -->

<property>

<name>dfs.data.dir</name>

<value>/Users/hehaibo/hadoop/hadoop-0.20.2-tmp/hadoop-data</value>

</property>

<!-- 设置HDFS文件系统的数据保存在什么目录下,可以设置多个,逗号分割 -->

<property>

<name>dfs.name.dir</name>

<value>//Users/hehaibo/hadoop/hadoop-0.20.2-tmp/hadoop-name</value>

</property>

<property>

<!-- 设置数据块的复制次数,默认是3,如果slave节点数少于3,则写成相应的1或者2 -->

<name>dfs.replication</name>

<value>1</value>

</property>

</configuration>

5.3 配置conf/mapred-site.xml

<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>

<!-- 设置jobtracker所在机器,端口号9001 -->

<name>mapred.job.tracker</name>

<value>localhost:8021</value>

</property>

</configuration>

6 配置ssh免密码登录

% sudo apt-get install ssh

% ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa 

% cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

--免登成功
hehaibolocal:~ hehaibo$ ssh localhost

Last login: Thu Jul 19 16:30:48 2018

hehaibolocal:~ hehaibo$ 


7 修改conf/hadoop-env.sh 增加java环境变量

export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/

8 hadoop 格式化hdfs文件系统

% hadoop namenode -format


hehaibolocal:~ hehaibo$ hadoop namenode -format

18/07/19 16:50:25 INFO namenode.NameNode: STARTUP_MSG: 

/************************************************************

STARTUP_MSG: Starting NameNode

STARTUP_MSG:   host = hehaibolocal.local/172.17.11.24

STARTUP_MSG:   args = [-format]

STARTUP_MSG:   version = 0.20.2

STARTUP_MSG:   build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20 -r 911707; compiled by 'chrisdo' on Fri Feb 19 08:07:34 UTC 2010

************************************************************/

18/07/19 16:50:26 INFO namenode.FSNamesystem: fsOwner=hehaibo,staff,access_bpf,everyone,localaccounts,_appserverusr,admin,_appserveradm,_lpadmin,_appstore,_lpoperator,_developer,_analyticsusers,com.apple.access_ftp,com.apple.access_screensharing,com.apple.access_ssh-disabled

18/07/19 16:50:26 INFO namenode.FSNamesystem: supergroup=supergroup

18/07/19 16:50:26 INFO namenode.FSNamesystem: isPermissionEnabled=true

18/07/19 16:50:26 INFO common.Storage: Image file of size 97 saved in 0 seconds.

18/07/19 16:50:26 INFO common.Storage: Storage directory /Users/hehaibo/hadoop/hadoop-0.20.2-tmp/hadoop-name has been successfully formatted.

18/07/19 16:50:26 INFO namenode.NameNode: SHUTDOWN_MSG: 

/************************************************************

SHUTDOWN_MSG: Shutting down NameNode at hehaibolocal.local/172.17.11.24

************************************************************/

9 启动hadoop环境

9.1启动

hehaibolocal:~ hehaibo$ start-dfs.sh

namenode running as process 5375. Stop it first.

localhost: starting datanode, logging to /Users/hehaibo/hadoop/hadoop-0.20.2/bin/../logs/hadoop-hehaibo-datanode-hehaibolocal.local.out

localhost: starting secondarynamenode, logging to /Users/hehaibo/hadoop/hadoop-0.20.2/bin/../logs/hadoop-hehaibo-secondarynamenode-hehaibolocal.local.out

hehaibolocal:~ hehaibo$ start-mapred.sh 

starting jobtracker, logging to /Users/hehaibo/hadoop/hadoop-0.20.2/bin/../logs/hadoop-hehaibo-jobtracker-hehaibolocal.local.out

localhost: starting tasktracker, logging to /Users/hehaibo/hadoop/hadoop-0.20.2/bin/../logs/hadoop-hehaibo-tasktracker-hehaibolocal.local.out

9.2查看启动进程

hehaibolocal:~ hehaibo$ jps

5603 DataNode

5669 SecondaryNameNode

5770 TaskTracker

5710 JobTracker

5375 NameNode

9.3 浏览器访问:

http://localhost:50070/dfshealth.jsp

http://localhost:50030/jobtracker.jsp

10 停止hadoop服务

hehaibolocal:~ hehaibo$ stop-dfs.sh 

stopping namenode

localhost: stopping datanode

localhost: stopping secondarynamenode

hehaibolocal:~ hehaibo$ stop-mapred.sh 

stopping jobtracker

localhost: stopping tasktracker

hehaibolocal:~ hehaibo$