toMap();
+
/**
* load current object from DB
*/
@@ -104,7 +116,6 @@ default boolean saveToFile(String basePath) throws ExceptionDBGit {
return res;
} catch (Exception e) {
- e.printStackTrace();
throw new ExceptionDBGit(e);
}
}
@@ -124,31 +135,41 @@ default IMetaObject loadFromFile(String basePath) throws Exception {
String filename = DBGitPath.getFullPath(basePath);
File file = new File(filename+"/"+getFileName());
FileInputStream fis = new FileInputStream(file);
- IMetaObject meta = this.deSerialize(fis);
+ IMetaObject meta;
+ if (!file.getPath().endsWith(".csv"))
+ meta = this.deSerialize(fis);
+ else
+ meta = this.deSerialize(file);
fis.close();
if (meta != null && meta.getName().isEmpty()) {
meta.setName(this.getName());
}
-
+
return meta;
}
-
+
default IMetaObject loadFromFile() throws Exception {
return loadFromFile(null);
}
-
+
public int addToGit() throws ExceptionDBGit;
-
+
public int removeFromGit() throws ExceptionDBGit;
default DBSchemaObject getUnderlyingDbObject(){
//All in one place
if(this instanceof MetaSql) return ((MetaSql) this).getSqlObject();
+ if(this instanceof MetaSequence) return ((MetaSequence) this).getSequence();
if(this instanceof MetaTable) return ((MetaTable) this).getTable();
return null;
}
+ default boolean dependsOn(IMetaObject obj){
+ if (this.getUnderlyingDbObject() == null || this.getUnderlyingDbObject().getDependencies() == null) return false;
+ return this.getUnderlyingDbObject().getDependencies().contains(obj.getName());
+ }
+
static IMetaObject create(String name) throws ExceptionDBGit {
NameMeta nm = new NameMeta(name);
if (nm.getType() == null) throw new ExceptionDBGit(DBGitLang.getInstance().getValue("errors", "meta", "parseError").withParams(name));
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaBase.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaBase.java
index 03d33e3..30d10cb 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaBase.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaBase.java
@@ -1,137 +1,164 @@
-package ru.fusionsoft.dbgit.meta;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.charset.Charset;
-
-import org.yaml.snakeyaml.DumperOptions;
-import org.yaml.snakeyaml.Yaml;
-import org.yaml.snakeyaml.DumperOptions.ScalarStyle;
-import org.yaml.snakeyaml.nodes.Tag;
-
-import ru.fusionsoft.dbgit.adapters.AdapterFactory;
-import ru.fusionsoft.dbgit.core.DBGit;
-import ru.fusionsoft.dbgit.core.DBGitPath;
-import ru.fusionsoft.dbgit.core.ExceptionDBGit;
-import ru.fusionsoft.dbgit.core.ExceptionDBGitRunTime;
-import ru.fusionsoft.dbgit.core.db.DbType;
-import ru.fusionsoft.dbgit.utils.ConsoleWriter;
-import ru.fusionsoft.dbgit.yaml.DBGitYamlConstructor;
-import ru.fusionsoft.dbgit.yaml.DBGitYamlRepresenter;
-import ru.fusionsoft.dbgit.yaml.YamlOrder;
-
-
-/**
- * Base class for all meta objects
- * @author mikle
- *
- */
-public abstract class MetaBase implements IMetaObject {
- @YamlOrder(0)
- protected String name;
-
- @YamlOrder(1)
- protected DbType dbType;
-
- @YamlOrder(1)
- protected String dbVersion;
-
- @Override
- public String getName() {
- return name;
- }
-
- @Override
- public void setDbType(DbType dbType) {
- this.dbType = dbType;
- }
-
- @Override
- public DbType getDbType() {
- return dbType;
- }
-
- @Override
- public void setDbVersion(String dbVersion) {
- this.dbVersion = dbVersion;
- }
-
- @Override
- public String getDbVersion() {
- return dbVersion;
- }
-
- @Override
- public void setName(String name) throws ExceptionDBGit {
- this.name = name;
- }
-
- @Override
- public String getFileName() {
- return getName();
- }
-
- /**
- * When you save the yaml object, the library ignores properties for which there is no getter and setter
- * При сохранении объекта yaml библиотека игнорирует свойства для которых нет геттера и сеттера
- * @param stream
- * @throws IOException
- */
- public boolean yamlSerialize(OutputStream stream) throws IOException {
- Yaml yaml = createYaml();
- String output = yaml.dumpAs(this, Tag.MAP, DumperOptions.FlowStyle.BLOCK);
-
- stream.write(output.getBytes(Charset.forName("UTF-8")));
- return true;
- }
-
- public IMetaObject yamlDeSerialize(InputStream stream) {
- Yaml yaml = createYaml();
-
- IMetaObject meta = yaml.loadAs(stream, this.getClass());
- return meta;
- }
-
- public Yaml createYaml() {
- DumperOptions options = new DumperOptions();
- options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
- options.setPrettyFlow(true);
- Yaml yaml = new Yaml(new DBGitYamlConstructor(), new DBGitYamlRepresenter(), options);
- return yaml;
- }
-
- @Override
- public int addToGit() throws ExceptionDBGit {
- DBGit dbGit = DBGit.getInstance();
- dbGit.addFileToIndexGit(DBGitPath.DB_GIT_PATH+"/"+getFileName());
- return 1;
- }
-
- @Override
- public int removeFromGit() throws ExceptionDBGit {
- DBGit dbGit = DBGit.getInstance();
- dbGit.removeFileFromIndexGit(DBGitPath.DB_GIT_PATH+"/"+getFileName());
- return 1;
- }
-
- public void setDbType() {
- try {
- setDbType(AdapterFactory.createAdapter().getDbType());
- } catch (ExceptionDBGit e) {
- throw new ExceptionDBGitRunTime(e.getLocalizedMessage());
- }
-
- }
-
- public void setDbVersion() {
- try {
- setDbVersion(AdapterFactory.createAdapter().getDbVersion());
- } catch (ExceptionDBGit e) {
- throw new ExceptionDBGitRunTime(e.getLocalizedMessage());
- }
-
- }
-
-
-}
+package ru.fusionsoft.dbgit.meta;
+
+import java.io.*;
+import java.nio.charset.Charset;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
+import org.yaml.snakeyaml.nodes.Tag;
+
+import ru.fusionsoft.dbgit.adapters.AdapterFactory;
+import ru.fusionsoft.dbgit.core.DBGit;
+import ru.fusionsoft.dbgit.core.DBGitPath;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRunTime;
+import ru.fusionsoft.dbgit.core.db.DbType;
+import ru.fusionsoft.dbgit.utils.StringProperties;
+import ru.fusionsoft.dbgit.yaml.DBGitYamlConstructor;
+import ru.fusionsoft.dbgit.yaml.DBGitYamlRepresenter;
+import ru.fusionsoft.dbgit.yaml.YamlOrder;
+
+
+/**
+ * Base class for all meta objects
+ * @author mikle
+ *
+ */
+public abstract class MetaBase implements IMetaObject {
+ @YamlOrder(0)
+ protected String name;
+
+ @YamlOrder(1)
+ protected DbType dbType;
+
+ @YamlOrder(2)
+ protected String dbVersion;
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public void setDbType(DbType dbType) {
+ this.dbType = dbType;
+ }
+
+ @Override
+ public DbType getDbType() {
+ return dbType;
+ }
+
+ @Override
+ public void setDbVersion(String dbVersion) {
+ this.dbVersion = dbVersion;
+ }
+
+ @Override
+ public String getDbVersion() {
+ return dbVersion;
+ }
+
+ @Override
+ public Double getDbVersionNumber() {
+ Matcher matcher = Pattern.compile("\\D*(\\d+)\\.(\\d+)").matcher(getDbVersion());
+ matcher.find();
+ Double result = Double.valueOf(matcher.group(0)+matcher.group(1));
+ return result;
+ }
+
+ @Override
+ public void setName(String name) throws ExceptionDBGit {
+ this.name = name;
+ }
+
+ @Override
+ public String getFileName() {
+ return getName();
+ }
+
+ /**
+ * When you save the yaml object, the library ignores properties for which there is no getter and setter
+ * При сохранении объекта yaml библиотека игнорирует свойства для которых нет геттера и сеттера
+ * @param stream
+ * @throws IOException
+ */
+ public boolean yamlSerialize(OutputStream stream) throws IOException {
+ Yaml yaml = createYaml();
+ String output = yaml.dumpAs(this, Tag.MAP, DumperOptions.FlowStyle.BLOCK);
+
+ stream.write(output.getBytes(Charset.forName("UTF-8")));
+ return true;
+ }
+
+ public IMetaObject yamlDeSerialize(InputStream stream) {
+ Yaml yaml = createYaml();
+ //Map some = yaml.loadAs(stream, Map.class);
+ IMetaObject meta = yaml.loadAs(stream, this.getClass());
+ return meta;
+ }
+
+ public Map toMap() {
+ Yaml yaml = createYaml();
+ String output = yaml.dumpAs(this, Tag.MAP, DumperOptions.FlowStyle.BLOCK);
+ Map meta = yaml.loadAs(output, Map.class);
+ return meta;
+ }
+
+ public Yaml createYaml() {
+ DumperOptions options = new DumperOptions();
+ options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
+ options.setPrettyFlow(true);
+
+ Yaml yaml = new Yaml(new DBGitYamlConstructor(), new DBGitYamlRepresenter(), options);
+ return yaml;
+ }
+
+ @Override
+ public int addToGit() throws ExceptionDBGit {
+ DBGit dbGit = DBGit.getInstance();
+ dbGit.addFileToIndexGit(DBGitPath.DB_GIT_PATH+"/"+getFileName());
+ return 1;
+ }
+
+ @Override
+ public int removeFromGit() throws ExceptionDBGit {
+ DBGit dbGit = DBGit.getInstance();
+ dbGit.removeFileFromIndexGit(DBGitPath.DB_GIT_PATH+"/"+getFileName());
+ return 1;
+ }
+
+ public void setDbType() {
+ try {
+ setDbType(AdapterFactory.createAdapter().getDbType());
+ } catch (ExceptionDBGit e) {
+ throw new ExceptionDBGitRunTime(e.getLocalizedMessage());
+ }
+
+ }
+
+ public void setDbVersion() {
+ try {
+ setDbVersion(AdapterFactory.createAdapter().getDbVersion());
+ } catch (ExceptionDBGit e) {
+ throw new ExceptionDBGitRunTime(e.getLocalizedMessage());
+ }
+
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (!(o instanceof MetaBase)) return false;
+ MetaBase metaBase = (MetaBase) o;
+ return getHash().equals(metaBase.getHash());
+ }
+
+ @Override
+ public int hashCode() {
+ return getHash().hashCode();
+ }
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaDomain.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaDomain.java
new file mode 100644
index 0000000..a7aa969
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaDomain.java
@@ -0,0 +1,40 @@
+package ru.fusionsoft.dbgit.meta;
+
+import ru.fusionsoft.dbgit.adapters.AdapterFactory;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.dbobjects.DBDomain;
+import ru.fusionsoft.dbgit.dbobjects.DBSQLObject;
+
+public class MetaDomain extends MetaSql {
+ public MetaDomain() {
+ }
+
+ public MetaDomain(DBSQLObject sqlObject) throws ExceptionDBGit {
+ super(sqlObject);
+ }
+
+ /**
+ * @return Type meta object
+ */
+ @Override
+ public final IDBGitMetaType getType() {
+ return DBGitMetaType.DBGitDomain;
+ }
+
+ /**
+ * load current object from DB
+ */
+ @Override
+ public final boolean loadFromDB() throws ExceptionDBGit {
+ final IDBAdapter adapter = AdapterFactory.createAdapter();
+ final NameMeta nm = MetaObjectFactory.parseMetaName(name);
+ final DBDomain dbObject = adapter.getDomain(nm.getSchema(), nm.getName());
+
+ if (dbObject != null) {
+ setSqlObject(dbObject);
+ return true;
+ } else
+ return false;
+ }
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaEnum.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaEnum.java
new file mode 100644
index 0000000..fd41f51
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaEnum.java
@@ -0,0 +1,32 @@
+package ru.fusionsoft.dbgit.meta;
+
+import ru.fusionsoft.dbgit.adapters.AdapterFactory;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.dbobjects.DBEnum;
+
+public class MetaEnum extends MetaSql {
+ /**
+ * @return Type meta object
+ */
+ @Override
+ public IDBGitMetaType getType() {
+ return DBGitMetaType.DBGitEnum;
+ }
+
+ /**
+ * load current object from DB
+ */
+ @Override
+ public boolean loadFromDB() throws ExceptionDBGit {
+ final IDBAdapter adapter = AdapterFactory.createAdapter();
+ final NameMeta nm = MetaObjectFactory.parseMetaName(name);
+ final DBEnum dbObject = adapter.getEnum(nm.getSchema(), nm.getName());
+
+ if (dbObject != null) {
+ setSqlObject(dbObject);
+ return true;
+ } else
+ return false;
+ }
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaFunction.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaFunction.java
index 9823362..09d6a0e 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaFunction.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaFunction.java
@@ -11,7 +11,7 @@ public class MetaFunction extends MetaSql {
public MetaFunction() {
super();
}
-
+
public MetaFunction(DBFunction fun) throws ExceptionDBGit {
super(fun);
}
@@ -21,14 +21,12 @@ public DBGitMetaType getType() {
return DBGitMetaType.DbGitFunction;
}
- @Override
- public String getName() {
- return name;
- }
-
@Override
public String getFileName(){
String res = name.replace(".fnc", "");
+ String schemaName = "";
+ if (res.contains("/"))
+ schemaName = res.substring(0, res.indexOf("/"));
if (this.getSqlObject() != null && this.getSqlObject().getOptions() != null && this.getSqlObject().getOptions().get("arguments") != null)
res = res + "_" + this.getSqlObject().getOptions().get("arguments").getData()
@@ -41,7 +39,7 @@ public String getFileName(){
.replace("::", "");
if (res.endsWith("_")) res = res.substring(0, res.length() - 1);
- if (res.length() > MAX_FILE_NAME_LENGTH) {
+ if (res.length() > (schemaName.length() + 1 + MAX_FILE_NAME_LENGTH)) {
String resTemp = res.substring(0, MAX_FILE_NAME_LENGTH);
int resInt = res.length() - MAX_FILE_NAME_LENGTH;
res = resTemp + "_" + resInt;
@@ -51,7 +49,7 @@ public String getFileName(){
return res;
}
-
+
@Override
public boolean loadFromDB() throws ExceptionDBGit {
IDBAdapter adapter = AdapterFactory.createAdapter();
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjOptions.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjOptions.java
index 8063e44..d4f5d8b 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjOptions.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjOptions.java
@@ -4,15 +4,14 @@
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map;
+import java.util.Objects;
-import ru.fusionsoft.dbgit.adapters.AdapterFactory;
-import ru.fusionsoft.dbgit.adapters.IDBAdapter;
import ru.fusionsoft.dbgit.core.DBGitLang;
import ru.fusionsoft.dbgit.core.ExceptionDBGit;
import ru.fusionsoft.dbgit.core.ExceptionDBGitObjectNotFound;
import ru.fusionsoft.dbgit.dbobjects.DBOptionsObject;
-import ru.fusionsoft.dbgit.dbobjects.DBUser;
import ru.fusionsoft.dbgit.utils.CalcHash;
+import ru.fusionsoft.dbgit.yaml.YamlOrder;
/**
* Base Meta class for data use DBOptionsObject information. This data is tree string properties.
@@ -21,6 +20,7 @@
*/
public abstract class MetaObjOptions extends MetaBase {
+ @YamlOrder(4)
private DBOptionsObject objectOption = null;
public MetaObjOptions() {
@@ -48,7 +48,7 @@ public boolean serialize(OutputStream stream) throws IOException {
}
@Override
- public IMetaObject deSerialize(InputStream stream) throws IOException{
+ public IMetaObject deSerialize(InputStream stream) {
return yamlDeSerialize(stream);
}
@@ -75,4 +75,16 @@ public void setObjectOptionFromMap(Map map) t
setObjectOption(map.get(nm.getName()));
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (!(o instanceof MetaObjOptions)) return false;
+ MetaObjOptions that = (MetaObjOptions) o;
+ return getObjectOption().getHash().equals(that.getObjectOption().getHash());
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(getObjectOption());
+ }
}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjectFactory.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjectFactory.java
index ca303bd..0beb554 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjectFactory.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaObjectFactory.java
@@ -46,21 +46,18 @@ public static IMetaObject createMetaObject(IDBGitMetaType tp) throws ExceptionDB
}
- public static NameMeta parseMetaName(String name) throws ExceptionDBGit {
- try {
- NameMeta nm = new NameMeta();
-
- Integer pos = name.lastIndexOf("/");
- if (pos > 0) {
- nm.setSchema(name.substring(0, pos));
- }
- Integer posDot = name.lastIndexOf(".");
- nm.setName(name.substring(pos+1, posDot));
- nm.setType(DBGitMetaType.valueByCode(name.substring(posDot + 1)));
+ public static NameMeta parseMetaName(String name) {
+ NameMeta nm = new NameMeta();
- return nm;
- } catch(Exception e) {
- throw new ExceptionDBGitRunTime(DBGitLang.getInstance().getValue("errors", "meta", "parseError").withParams(name), e);
+ Integer pos = name.lastIndexOf("/");
+ if (pos > 0) {
+ nm.setSchema(name.substring(0, pos));
}
+ Integer posDot = name.lastIndexOf(".");
+ nm.setName(name.substring(pos+1, posDot));
+ nm.setType(DBGitMetaType.valueByCode(name.substring(posDot + 1)));
+
+ return nm;
+
}
}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaProcedure.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaProcedure.java
index 7335917..46a8a61 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaProcedure.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaProcedure.java
@@ -29,6 +29,9 @@ public String getName() {
@Override
public String getFileName(){
String res = name.replace(".prc", "");
+ String schemaName = "";
+ if (res.contains("/"))
+ schemaName = res.substring(0, res.indexOf("/"));
if (this.getSqlObject() != null && this.getSqlObject().getOptions() != null && this.getSqlObject().getOptions().get("arguments") != null)
res = res + "_" + this.getSqlObject().getOptions().get("arguments").getData()
@@ -41,7 +44,7 @@ public String getFileName(){
.replace("::", "");
if (res.endsWith("_")) res = res.substring(0, res.length() - 1);
- if (res.length() > MAX_FILE_NAME_LENGTH) {
+ if (res.length() > (schemaName.length() + 1 + MAX_FILE_NAME_LENGTH)) {
String resTemp = res.substring(0, MAX_FILE_NAME_LENGTH);
int resInt = res.length() - MAX_FILE_NAME_LENGTH;
res = resTemp + "_" + resInt;
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaSql.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaSql.java
index 78667f9..7319170 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaSql.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaSql.java
@@ -1,18 +1,10 @@
package ru.fusionsoft.dbgit.meta;
+import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.io.StringWriter;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.util.Map;
-import org.apache.commons.io.IOUtils;
-
-import ru.fusionsoft.dbgit.core.DBGitLang;
import ru.fusionsoft.dbgit.core.ExceptionDBGit;
-import ru.fusionsoft.dbgit.core.ExceptionDBGitObjectNotFound;
-import ru.fusionsoft.dbgit.dbobjects.DBOptionsObject;
import ru.fusionsoft.dbgit.dbobjects.DBSQLObject;
/**
@@ -21,19 +13,18 @@
*
*/
public abstract class MetaSql extends MetaBase {
-
-
+
protected DBSQLObject sqlObject;
public MetaSql() {
setDbType();
setDbVersion();
}
-
+
public MetaSql(DBSQLObject sqlObject) throws ExceptionDBGit {
this();
setSqlObject(sqlObject);
- }
-
+ }
+
public DBSQLObject getSqlObject() {
return sqlObject;
}
@@ -45,7 +36,7 @@ public void setSqlObject(DBSQLObject sqlObject) throws ExceptionDBGit {
}
@Override
- public boolean serialize(OutputStream stream) throws Exception {
+ public boolean serialize(OutputStream stream) throws IOException {
/*
String owner = "owner: "+getSqlObject().getOwner()+"\n";
stream.write(owner.getBytes(Charset.forName("UTF-8")));
@@ -59,7 +50,7 @@ public boolean serialize(OutputStream stream) throws Exception {
}
@Override
- public IMetaObject deSerialize(InputStream stream) throws Exception {
+ public IMetaObject deSerialize(InputStream stream) {
NameMeta nm = MetaObjectFactory.parseMetaName(getName());
/*
sqlObject = new DBSQLObject();
@@ -83,12 +74,13 @@ public IMetaObject deSerialize(InputStream stream) throws Exception {
public String getHash() {
return sqlObject != null ? sqlObject.getHash() : EMPTY_HASH;
}
- public void setObjectOptionFromMap(Map map) throws ExceptionDBGit {
- NameMeta nm = MetaObjectFactory.parseMetaName(getName());
- if (!map.containsKey(nm.getName())) {
- throw new ExceptionDBGitObjectNotFound(DBGitLang.getInstance().getValue("errors", "meta", "notFound").withParams(getName()));
- }
- setSqlObject(map.get(nm.getName()));
- }
+
+// public void setSqlObjectFromMap(Map map) throws ExceptionDBGit {
+// NameMeta nm = MetaObjectFactory.parseMetaName(getName());
+// if (!map.containsKey(nm.getName())) {
+// throw new ExceptionDBGitObjectNotFound(DBGitLang.getInstance().getValue("errors", "meta", "notFound").withParams(getName()));
+// }
+// setSqlObject(map.get(nm.getName()));
+// }
}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaTable.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaTable.java
index 96bbcb1..927a338 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaTable.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaTable.java
@@ -11,55 +11,49 @@
import ru.fusionsoft.dbgit.adapters.IDBAdapter;
import ru.fusionsoft.dbgit.core.DBGitIndex;
import ru.fusionsoft.dbgit.core.ExceptionDBGit;
-import ru.fusionsoft.dbgit.core.ExceptionDBGitRunTime;
-import ru.fusionsoft.dbgit.core.ItemIndex;
-import ru.fusionsoft.dbgit.dbobjects.DBConstraint;
-import ru.fusionsoft.dbgit.dbobjects.DBIndex;
-import ru.fusionsoft.dbgit.dbobjects.DBSchema;
-import ru.fusionsoft.dbgit.dbobjects.DBTable;
-import ru.fusionsoft.dbgit.dbobjects.DBTableField;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitObjectNotFound;
+import ru.fusionsoft.dbgit.dbobjects.*;
import ru.fusionsoft.dbgit.utils.CalcHash;
-import ru.fusionsoft.dbgit.utils.ConsoleWriter;
import ru.fusionsoft.dbgit.yaml.YamlOrder;
/**
- * Meta class for db Table
+ * Meta class for db Table
* @author mikle
*
*/
-public class MetaTable extends MetaBase {
+public class MetaTable extends MetaBase {
- @YamlOrder(1)
+ @YamlOrder(3)
private DBTable table;
-
- @YamlOrder(2)
+
+ @YamlOrder(4)
//private IMapFields fields = new TreeMapFields();
private Map fields = new TreeMap<>();
-
- @YamlOrder(3)
+
+ @YamlOrder(5)
private Map indexes = new TreeMap<>();
-
- @YamlOrder(4)
+
+ @YamlOrder(6)
private Map constraints = new TreeMap<>();
- public MetaTable() {
+ public MetaTable() {
setDbType();
setDbVersion();
}
-
+
public MetaTable(String namePath) {
setDbType();
setDbVersion();
this.name = namePath;
}
-
+
public MetaTable(DBTable tbl) {
setDbType();
setDbVersion();
setTable(tbl);
}
-
+
@Override
public DBGitMetaType getType() {
return DBGitMetaType.DBGitTable;
@@ -71,7 +65,7 @@ public boolean serialize(OutputStream stream) throws IOException {
}
@Override
- public IMetaObject deSerialize(InputStream stream) throws IOException {
+ public IMetaObject deSerialize(InputStream stream) {
return yamlDeSerialize(stream);
}
@@ -79,12 +73,15 @@ public IMetaObject deSerialize(InputStream stream) throws IOException {
public boolean loadFromDB() throws ExceptionDBGit {
IDBAdapter adapter = AdapterFactory.createAdapter();
NameMeta nm = MetaObjectFactory.parseMetaName(getName());
-
- DBTable tbl = adapter.getTable(nm.getSchema(), nm.getName());
- if (tbl != null)
- return loadFromDB(tbl);
- else
+ try {
+ DBTable tbl = adapter.getTable(nm.getSchema(), nm.getName());
+ if (tbl != null)
+ return loadFromDB(tbl);
+ else
+ return false;
+ } catch (ExceptionDBGitObjectNotFound exnf) {
return false;
+ }
}
public boolean loadFromDB(DBTable tbl) throws ExceptionDBGit {
@@ -126,7 +123,13 @@ public boolean loadFromDB(DBTable tbl) throws ExceptionDBGit {
@Override
public String getHash() {
- CalcHash ch = new CalcHash();
+ CalcHash ch = new CalcHash()/*{
+ @Override
+ public CalcHash addData(String str){
+ ConsoleWriter.printlnRed(str);
+ return super.addData(str);
+ }
+ }*/;
ch.addData(this.getName());
if (getTable() != null) {
@@ -142,20 +145,17 @@ public String getHash() {
}
- if (indexes != null) {
- for (String item : indexes.keySet()) {
- ch.addData(item);
- ch.addData(indexes.get(item).getHash());
+ for (String item : indexes.keySet()) {
+ if(constraints.containsKey(item)) continue;
+ ch.addData(item);
+ ch.addData(indexes.get(item).getHash());
- }
}
-
- if (constraints != null) {
- for (String item : constraints.keySet()) {
- ch.addData(item);
- ch.addData(constraints.get(item).getHash());
- }
+ for (String item : constraints.keySet()) {
+ ch.addData(item);
+ ch.addData(constraints.get(item).getHash());
+
}
return ch.calcHashStr();
@@ -213,15 +213,27 @@ public void setConstraints(Map constraints) {
this.constraints.putAll(constraints);
}
- public List getIdColumns() {
- List idColumns = new ArrayList<>();
-
+ public List getIdColumns() {
+ List idColumns = new ArrayList<>();
+
+ int i = 0;
for (DBTableField field : fields.values()) {
if (field.getIsPrimaryKey()) {
- idColumns.add(field.getName());
+ //idColumns.add(field.getName());
+ idColumns.add(i);
}
+ i++;
}
return idColumns;
}
+// private String truncateHash(String hash){
+// return hash.substring(
+// 0,
+// 2
+// ) + hash.substring(
+// hash.length() - 3,
+// hash.length() - 1
+// );
+// }
}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaTableData.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaTableData.java
index 34f4400..fa3042d 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/MetaTableData.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaTableData.java
@@ -1,374 +1,453 @@
-package ru.fusionsoft.dbgit.meta;
-
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.nio.charset.Charset;
-import java.sql.ResultSet;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.csv.CSVFormat;
-import org.apache.commons.csv.CSVParser;
-import org.apache.commons.csv.CSVPrinter;
-import org.apache.commons.csv.CSVRecord;
-import org.apache.commons.csv.QuoteMode;
-
-import com.diogonunes.jcdp.color.api.Ansi.FColor;
-
-import ru.fusionsoft.dbgit.adapters.AdapterFactory;
-import ru.fusionsoft.dbgit.adapters.IDBAdapter;
-import ru.fusionsoft.dbgit.core.DBGit;
-import ru.fusionsoft.dbgit.core.DBGitConfig;
-import ru.fusionsoft.dbgit.core.DBGitLang;
-import ru.fusionsoft.dbgit.core.DBGitPath;
-import ru.fusionsoft.dbgit.core.ExceptionDBGit;
-import ru.fusionsoft.dbgit.core.ExceptionDBGitRunTime;
-import ru.fusionsoft.dbgit.core.GitMetaDataManager;
-import ru.fusionsoft.dbgit.data_table.ICellData;
-import ru.fusionsoft.dbgit.data_table.RowData;
-import ru.fusionsoft.dbgit.data_table.TreeMapRowData;
-import ru.fusionsoft.dbgit.dbobjects.DBTable;
-import ru.fusionsoft.dbgit.dbobjects.DBTableData;
-import ru.fusionsoft.dbgit.utils.CalcHash;
-import ru.fusionsoft.dbgit.utils.ConsoleWriter;
-
-/**
- * Meta class for Table data
- * @author mikle
- *
- */
-public class MetaTableData extends MetaBase {
- protected DBTable table = null;
- private DBTableData dataTable = null;
-
- private TreeMapRowData mapRows = null;
-
- public MetaTableData() {
- setDbType();
- setDbVersion();
- }
-
- public MetaTableData(DBTable tbl) throws ExceptionDBGit {
- setDbType();
- setDbVersion();
- setTable(tbl);
- }
-
-
- public DBTable getTable() {
- return table;
- }
-
- public TreeMap getmapRows() {
- return mapRows;
- }
-
- public DBTableData getDataTable() {
- return dataTable;
- }
-
- public void setMapRows(TreeMapRowData mapRows) {
- this.mapRows = mapRows;
- }
-
- public void setDataTable(DBTableData dataTable) {
- this.dataTable = dataTable;
- }
-
- public void setTable(DBTable table) throws ExceptionDBGit {
- this.table = table;
- setName(table.getSchema()+"/"+table.getName()+"."+getType().getValue());
- }
-
-
-
- @Override
- public void setName(String name) throws ExceptionDBGit {
- if (table == null) {
- NameMeta nm = MetaObjectFactory.parseMetaName(name);
- table = new DBTable();
- table.setSchema(nm.getSchema());
- table.setName(nm.getName());
- }
-
- super.setName(name);
- }
-
- @Override
- public DBGitMetaType getType() {
- return DBGitMetaType.DbGitTableData;
- }
-
- public CSVFormat getCSVFormat() {
- return CSVFormat.DEFAULT
- //.withRecordSeparator("\n")
- .withDelimiter(';')
- .withNullString("")
- .withQuote('"')
- //.withQuoteMode(QuoteMode.ALL)
- ;
- }
-
- public MetaTable getMetaTable() throws ExceptionDBGit {
- String metaTblName = table.getSchema()+"/"+table.getName()+"."+DBGitMetaType.DBGitTable.getValue();
- GitMetaDataManager gmdm = GitMetaDataManager.getInstance();
-
- IMapMetaObject dbObjs = gmdm.getCacheDBMetaData();
- MetaTable metaTable = (MetaTable) dbObjs.get(metaTblName);
- if (metaTable == null ) {
- metaTable = new MetaTable();
- metaTable.loadFromDB(table);
- }
- return metaTable;
- }
-
- public MetaTable getMetaTableFromFile() throws ExceptionDBGit {
- String metaTblName = table.getSchema()+"/"+table.getName()+"."+DBGitMetaType.DBGitTable.getValue();
- GitMetaDataManager gmdm = GitMetaDataManager.getInstance();
-
- MetaTable metaTable = (MetaTable)gmdm.loadMetaFile(metaTblName);
- if (metaTable != null)
- return metaTable;
-
- return getMetaTable();
- }
-
-
- @Override
- public boolean serialize(OutputStream stream) throws Exception {
- Integer count = 0;
- Set fields = null;
-
- if (mapRows == null) {
- return false;
- }
-
- CSVPrinter csvPrinter = new CSVPrinter(new OutputStreamWriter(stream), getCSVFormat());
-
- for (RowData rd : mapRows.values()) {
- if (count == 0) {
- fields = rd.getData().keySet();
- csvPrinter.printRecord(fields);
- }
-
- rd.saveDataToCsv(csvPrinter, getTable());
-
- count++;
- }
- csvPrinter.close();
- return true;
- }
-
- @Override
- public IMetaObject deSerialize(InputStream stream) throws Exception {
-
- MetaTable metaTable = getMetaTableFromFile();
-
- CSVParser csvParser = new CSVParser(new InputStreamReader(stream), getCSVFormat());
- List csvRecords = csvParser.getRecords();
-
- if (csvRecords.size() > 0) {
- CSVRecord titleColumns = csvRecords.get(0);
-
- mapRows = new TreeMapRowData();
-
- for (int i = 1; i < csvRecords.size(); i++) {
- RowData rd = new RowData(csvRecords.get(i), metaTable, titleColumns);
- mapRows.put(rd);
- }
- }
-
- csvParser.close();
-
- //saveToFile("test");
-
- return this;
- }
-
- public boolean loadPortionFromDB(int currentPortionIndex) throws ExceptionDBGit {
- return loadPortionFromDB(currentPortionIndex, 0);
- }
-
- public boolean loadPortionFromDB(int currentPortionIndex, int tryNumber) throws ExceptionDBGit {
- try {
- IDBAdapter adapter = AdapterFactory.createAdapter();
- MetaTable metaTable = getMetaTable();
- if (metaTable.getFields().size() == 0)
- return false;
-
- dataTable = adapter.getTableDataPortion(table.getSchema(), table.getName(), currentPortionIndex, 0);
-
- ResultSet rs = dataTable.getResultSet();
-
- if (dataTable.getErrorFlag() > 0) {
- ConsoleWriter.printlnColor(DBGitLang.getInstance().getValue("errors", "meta", "tooManyRecords").
- withParams(getName(), String.valueOf(IDBAdapter.MAX_ROW_COUNT_FETCH)), FColor.RED, 0);
- return false;
- }
-
- mapRows = new TreeMapRowData();
-
- while(rs.next()){
- RowData rd = new RowData(rs, metaTable);
- mapRows.put(rd);
- }
- return true;
- } catch (Exception e) {
- e.printStackTrace();
- ConsoleWriter.println(e.getMessage());
- ConsoleWriter.println(e.getLocalizedMessage());
-
- try {
- if (tryNumber <= DBGitConfig.getInstance().getInteger("core", "TRY_COUNT", DBGitConfig.getInstance().getIntegerGlobal("core", "TRY_COUNT", 1000))) {
- try {
- TimeUnit.SECONDS.sleep(DBGitConfig.getInstance().getInteger("core", "TRY_DELAY", DBGitConfig.getInstance().getIntegerGlobal("core", "TRY_DELAY", 1000)));
- } catch (InterruptedException e1) {
- throw new ExceptionDBGitRunTime(e1.getMessage());
- }
- ConsoleWriter.println("Error while getting portion of data, try " + tryNumber);
- return loadPortionFromDB(currentPortionIndex, tryNumber++);
- }
- } catch (Exception e1) {
- // TODO Auto-generated catch block
- e1.printStackTrace();
- }
-
- if (e instanceof ExceptionDBGit)
- throw (ExceptionDBGit)e;
- throw new ExceptionDBGit(e);
- }
- }
-
- @Override
- public boolean loadFromDB() throws ExceptionDBGit {
- try {
- IDBAdapter adapter = AdapterFactory.createAdapter();
-
- MetaTable metaTable = getMetaTable();
-
- if (metaTable.getFields().size() == 0)
- return false;
-
- List idColumns = metaTable.getIdColumns();
-
- dataTable = adapter.getTableData(table.getSchema(), table.getName());
-
- if (dataTable.getErrorFlag() > 0) {
- ConsoleWriter.printlnColor(DBGitLang.getInstance().getValue("errors", "meta", "tooManyRecords").
- withParams(getName(), String.valueOf(IDBAdapter.MAX_ROW_COUNT_FETCH)), FColor.RED, 0);
- return false;
- }
-
- ResultSet rs = dataTable.getResultSet();
-
- mapRows = new TreeMapRowData();
-
- //System.out.println("load from db file "+getName());
- while(rs.next()){
- RowData rd = new RowData(rs, metaTable);
- mapRows.put(rd);
- }
- return true;
- /*
- System.out.println("******************************************");
- System.out.println();
- */
- } catch (Exception e) {
- e.printStackTrace();
- if (e instanceof ExceptionDBGit)
- throw (ExceptionDBGit)e;
- throw new ExceptionDBGit(e);
- }
-
- }
-
- public void diff(MetaTableData ob) throws Exception {
- if (mapRows.size() != ob.mapRows.size()) {
- System.out.println(DBGitLang.getInstance().getValue("general", "meta", "diffSize1").withParams(String.valueOf(mapRows.size()), String.valueOf(ob.mapRows.size())));
- }
- for (String rowHash : mapRows.keySet()) {
- RowData r1 = mapRows.get(rowHash);
- RowData r2 = ob.mapRows.get(rowHash);
-
- System.out.println(rowHash);
- System.out.println(r1.getData()+ " "+ r2.getData());
-
- if (r1.getData().size() != r2.getData().size()) {
- System.out.println(DBGitLang.getInstance().getValue("general", "meta", "diffSize2").withParams(rowHash));
- }
-
- for (String col : r1.getData().keySet()) {
- String d1 = r1.getData().get(col).convertToString();
- String d2 = r2.getData().get(col).convertToString();
-
- if (d1 != d2) {
- if (!d1.equals(r2.getData().get(col))) {
- System.out.println(DBGitLang.getInstance().getValue("general", "meta", "diffDataRow").
- withParams(rowHash, col, r1.getData().get(col).toString(), r2.getData().get(col).toString()));
- }
- }
- }
- }
- }
-
-
- @Override
- public String getHash() {
- CalcHash ch = new CalcHash();
- if (mapRows == null)
- return EMPTY_HASH;
-
- if (mapRows.size() == 0)
- return EMPTY_HASH;
-
- //System.out.println(getName());
- int n = 0;
- for (RowData rd : mapRows.values()) {
- ch.addData(rd.getHashRow());
- //System.out.println("row "+n+" "+rd.getHashRow());
- n++;
- }
-
- return ch.calcHashStr();
- }
-
- @Override
- public int addToGit() throws ExceptionDBGit {
- int count = super.addToGit();
-
- if (mapRows == null) return count;
-
- for (RowData rd : mapRows.values()) {
- for (ICellData cd : rd.getData().values()) {
- count += cd.addToGit();
- }
- }
-
- return count;
- }
-
- @Override
- public int removeFromGit() throws ExceptionDBGit {
- int count = super.removeFromGit();
-
- if (mapRows == null)
- return 1;
-
- for (RowData rd : mapRows.values()) {
- for (ICellData cd : rd.getData().values()) {
- count += cd.removeFromGit();
- }
- }
-
- return count;
- }
-
-}
+package ru.fusionsoft.dbgit.meta;
+
+import java.io.*;
+import java.nio.charset.StandardCharsets;
+import java.sql.ResultSet;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+
+import de.siegmar.fastcsv.reader.CsvParser;
+import de.siegmar.fastcsv.reader.CsvReader;
+import de.siegmar.fastcsv.reader.CsvRow;
+
+import org.apache.commons.csv.CSVFormat;
+import org.apache.commons.csv.CSVParser;
+import org.apache.commons.csv.CSVPrinter;
+import org.apache.commons.csv.CSVRecord;
+
+import com.diogonunes.jcdp.color.api.Ansi.FColor;
+
+import org.apache.commons.lang3.exception.ExceptionUtils;
+import org.slf4j.Logger;
+import ru.fusionsoft.dbgit.adapters.AdapterFactory;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.DBGitConfig;
+import ru.fusionsoft.dbgit.core.DBGitLang;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRunTime;
+import ru.fusionsoft.dbgit.core.GitMetaDataManager;
+import ru.fusionsoft.dbgit.data_table.ICellData;
+import ru.fusionsoft.dbgit.data_table.MapFileData;
+import ru.fusionsoft.dbgit.data_table.RowData;
+import ru.fusionsoft.dbgit.data_table.TreeMapRowData;
+import ru.fusionsoft.dbgit.dbobjects.DBTable;
+import ru.fusionsoft.dbgit.dbobjects.DBTableData;
+import ru.fusionsoft.dbgit.utils.CalcHash;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+import ru.fusionsoft.dbgit.utils.LoggerUtil;
+
+/**
+ * Meta class for Table data
+ * @author mikle
+ *
+ */
+public class MetaTableData extends MetaBase {
+ private Logger logger = LoggerUtil.getLogger(this.getClass());
+ protected DBTable table = null;
+ private DBTableData dataTable = null;
+
+ private TreeMapRowData mapRows = null;
+ private List fields = new ArrayList<>();
+
+ public MetaTableData() {
+ setDbType();
+ setDbVersion();
+ }
+
+ public MetaTableData(DBTable tbl) throws ExceptionDBGit {
+ setDbType();
+ setDbVersion();
+ setTable(tbl);
+ }
+
+
+ public DBTable getTable() {
+ return table;
+ }
+
+ public TreeMap getmapRows() {
+ return mapRows;
+ }
+
+ public DBTableData getDataTable() {
+ return dataTable;
+ }
+
+ public void setMapRows(TreeMapRowData mapRows) {
+ this.mapRows = mapRows;
+ }
+
+ public void setDataTable(DBTableData dataTable) {
+ this.dataTable = dataTable;
+ }
+
+ public void setTable(DBTable table) throws ExceptionDBGit {
+ this.table = table;
+ setName(table.getSchema()+"/"+table.getName()+"."+getType().getValue());
+ }
+
+ public void setFields(List fields) {
+ this.fields = fields;
+ }
+
+ @Override
+ public void setName(String name) throws ExceptionDBGit {
+ if (table == null) {
+ NameMeta nm = MetaObjectFactory.parseMetaName(name);
+ table = new DBTable.OnlyNameDBTable(nm.getName(), nm.getSchema());
+ }
+
+ super.setName(name);
+ }
+
+ @Override
+ public DBGitMetaType getType() {
+ return DBGitMetaType.DbGitTableData;
+ }
+
+ public CSVFormat getCSVFormat() {
+ return CSVFormat.DEFAULT
+ //.withRecordSeparator("\n")
+ .withDelimiter(';')
+ .withNullString("")
+ .withQuote('"')
+ //.withQuoteMode(QuoteMode.ALL)
+ ;
+ }
+
+ public MetaTable getMetaTable() throws ExceptionDBGit {
+ String metaTblName = table.getSchema()+"/"+table.getName()+"."+DBGitMetaType.DBGitTable.getValue();
+ GitMetaDataManager gmdm = GitMetaDataManager.getInstance();
+
+ IMapMetaObject dbObjs = gmdm.getCacheDBMetaData();
+ MetaTable metaTable = (MetaTable) dbObjs.get(metaTblName);
+ if (metaTable == null ) {
+ metaTable = new MetaTable();
+ metaTable.loadFromDB(table);
+ }
+ return metaTable;
+ }
+
+ public MetaTable getMetaTableFromFile() throws ExceptionDBGit {
+ String metaTblName = table.getSchema()+"/"+table.getName()+"."+DBGitMetaType.DBGitTable.getValue();
+ GitMetaDataManager gmdm = GitMetaDataManager.getInstance();
+
+ MetaTable metaTable = (MetaTable)gmdm.loadMetaFile(metaTblName);
+ if (metaTable != null)
+ return metaTable;
+
+ //TODO ... which is not from file, but from db
+ return getMetaTable();
+ }
+
+
+ @Override
+ public boolean serialize(OutputStream stream) throws Exception {
+ Integer count = 0;
+ Set fields = null;
+
+ if (mapRows == null) {
+ return false;
+ }
+
+ CSVPrinter csvPrinter = new CSVPrinter(new OutputStreamWriter(stream), getCSVFormat());
+
+ for (RowData rd : mapRows.values()) {
+ if (count == 0) {
+ fields = rd.getData(this.fields).keySet();
+ csvPrinter.printRecord(fields);
+ }
+
+ rd.saveDataToCsv(csvPrinter, getTable());
+
+ count++;
+ }
+ csvPrinter.close();
+ return true;
+ }
+
+ @Override
+ public IMetaObject deSerialize(File file) throws Exception {
+ MetaTable metaTable = getMetaTableFromFile();
+
+ CsvReader csvReader = new CsvReader();
+ csvReader.setFieldSeparator(';');
+ csvReader.setContainsHeader(false);
+ int i = 1;
+
+ try (CsvParser csvParser = csvReader.parse(file, StandardCharsets.UTF_8)) {
+ CsvRow row;
+ boolean flag = false;
+ mapRows = new TreeMapRowData();
+ CsvRow titleColumns = null;
+
+
+ while ((row = csvParser.nextRow()) != null) {
+ if (!flag) {
+ titleColumns = row;
+ fields = row.getFields();
+// System.err.println("fields = " + fields);
+ } else {
+ RowData rd = new RowData(row, metaTable, titleColumns);
+ mapRows.put(rd);
+ i++;
+ }
+ flag = true;
+ }
+ } catch (Throwable ex){
+ ConsoleWriter.detailsPrint(DBGitLang.getInstance().getValue("general", "meta", "loadRow").withParams(String.valueOf(i) ));
+ warnFilesNotFound();
+ throw ex;
+ }
+ ConsoleWriter.detailsPrint(DBGitLang.getInstance().getValue("general", "meta", "loadedRow").withParams(String.valueOf(i) ));
+ warnFilesNotFound();
+
+ return this;
+ }
+
+
+ @Override
+ @Deprecated
+ public IMetaObject deSerialize(InputStream stream) throws Exception {
+
+ MetaTable metaTable = getMetaTableFromFile();
+
+ CSVParser csvParser = new CSVParser(new InputStreamReader(stream), getCSVFormat());
+ List csvRecords = csvParser.getRecords();
+
+ if (csvRecords.size() > 0) {
+ CSVRecord titleColumns = csvRecords.get(0);
+ fields.clear();
+ for (int i = 0; i < csvRecords.get(0).size(); i++) {
+ fields.add(csvRecords.get(0).get(i));
+ }
+
+ mapRows = new TreeMapRowData();
+
+ for (int i = 1; i < csvRecords.size(); i++) {
+ RowData rd = new RowData(csvRecords.get(i), metaTable, titleColumns);
+ mapRows.put(rd);
+ }
+ }
+
+
+ csvParser.close();
+
+ //saveToFile("test");
+
+ return this;
+ }
+
+ public boolean loadPortionFromDB(int currentPortionIndex) throws ExceptionDBGit {
+ return loadPortionFromDB(currentPortionIndex, 0);
+ }
+
+ public boolean loadPortionFromDB(int currentPortionIndex, int tryNumber) throws ExceptionDBGit {
+ try {
+ IDBAdapter adapter = AdapterFactory.createAdapter();
+ MetaTable metaTable = getMetaTable();
+ if (metaTable.getFields().size() == 0)
+ return false;
+
+ dataTable = adapter.getTableDataPortion(table.getSchema(), table.getName(), currentPortionIndex, 0);
+
+ ResultSet rs = dataTable.resultSet();
+
+ if (dataTable.errorFlag() > 0) {
+ final String tooManyRecordsMsg = DBGitLang.getInstance().getValue("errors", "meta", "tooManyRecords").withParams(getName(), String.valueOf(IDBAdapter.MAX_ROW_COUNT_FETCH));
+ ConsoleWriter.printlnColor(tooManyRecordsMsg, FColor.RED, 0);
+ return false;
+ }
+
+ mapRows = new TreeMapRowData();
+
+ boolean flag = false;
+ while(rs.next()){
+
+ if (!flag) {
+ fields.clear();
+ for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) {
+ String columnName = rs.getMetaData().getColumnName(i + 1);
+ if (columnName.equalsIgnoreCase("DBGIT_ROW_NUM"))
+ continue;
+ fields.add(columnName);
+ }
+ }
+
+ flag = true;
+ RowData rd = new RowData(rs, metaTable);
+ mapRows.put(rd);
+ }
+
+
+
+ return true;
+ } catch (Exception e) {
+
+ ConsoleWriter.println(e.getLocalizedMessage(), messageLevel);
+ ConsoleWriter.detailsPrintln(ExceptionUtils.getStackTrace(e), messageLevel);
+ logger.error(DBGitLang.getInstance().getValue("errors", "adapter", "tableData").toString(), e);
+
+ try {
+ if (tryNumber <= DBGitConfig.getInstance().getInteger("core", "TRY_COUNT", DBGitConfig.getInstance().getIntegerGlobal("core", "TRY_COUNT", 1000))) {
+ try {
+ TimeUnit.SECONDS.sleep(DBGitConfig.getInstance().getInteger("core", "TRY_DELAY", DBGitConfig.getInstance().getIntegerGlobal("core", "TRY_DELAY", 1000)));
+ } catch (InterruptedException e1) {
+ throw new ExceptionDBGitRunTime(e1.getMessage());
+ }
+ ConsoleWriter.println(DBGitLang.getInstance()
+ .getValue("errors", "dataTable", "tryAgain")
+ .withParams(String.valueOf(tryNumber))
+ , messageLevel
+ );
+ return loadPortionFromDB(currentPortionIndex, tryNumber++);
+ }
+ } catch (Exception e1) {
+ throw new ExceptionDBGitRunTime(e1);
+ // TODO Auto-generated catch block
+// e1.printStackTrace();
+ }
+
+ if (e instanceof ExceptionDBGit) throw (ExceptionDBGit)e;
+ throw new ExceptionDBGit(e);
+ }
+ }
+
+ @Override
+ public boolean loadFromDB() throws ExceptionDBGit {
+ try {
+ IDBAdapter adapter = AdapterFactory.createAdapter();
+
+ MetaTable metaTable = getMetaTable();
+
+ if (metaTable.getFields().size() == 0)
+ return false;
+
+ dataTable = adapter.getTableData(table.getSchema(), table.getName());
+
+ if (dataTable.errorFlag() > 0) {
+ ConsoleWriter.printlnColor(DBGitLang.getInstance().getValue("errors", "meta", "tooManyRecords").
+ withParams(getName(), String.valueOf(IDBAdapter.MAX_ROW_COUNT_FETCH)), FColor.RED, 0);
+ return false;
+ }
+
+ ResultSet rs = dataTable.resultSet();
+
+ mapRows = new TreeMapRowData();
+
+ //System.out.println("load from db file "+getName());
+ while(rs.next()){
+ RowData rd = new RowData(rs, metaTable);
+ mapRows.put(rd);
+ }
+ return true;
+ /*
+ System.out.println("******************************************");
+ System.out.println();
+ */
+ } catch (Exception e) {
+ throw new ExceptionDBGit("Error loading table data from DB", e);
+ }
+
+ }
+
+ public void diff(MetaTableData ob) throws Exception {
+ if (mapRows.size() != ob.mapRows.size()) {
+ System.out.println(DBGitLang.getInstance().getValue("general", "meta", "diffSize1").withParams(String.valueOf(mapRows.size()), String.valueOf(ob.mapRows.size())));
+ }
+ for (String rowHash : mapRows.keySet()) {
+ RowData r1 = mapRows.get(rowHash);
+ RowData r2 = ob.mapRows.get(rowHash);
+
+ System.out.println(rowHash);
+ System.out.println(r1.getData(fields)+ " "+ r2.getData(ob.fields));
+
+ if (r1.getData(fields).size() != r2.getData(ob.fields).size()) {
+ System.out.println(DBGitLang.getInstance().getValue("general", "meta", "diffSize2").withParams(rowHash));
+ }
+
+ for (String col : r1.getData(fields).keySet()) {
+ String d1 = r1.getData(fields).get(col).convertToString();
+ String d2 = r2.getData(ob.fields).get(col).convertToString();
+
+ if (d1 != d2) {
+ if (!d1.equals(r2.getData(ob.fields).get(col))) {
+ System.out.println(DBGitLang.getInstance().getValue("general", "meta", "diffDataRow").
+ withParams(rowHash, col, r1.getData(fields).get(col).toString(), r2.getData(ob.fields).get(col).toString()));
+ }
+ }
+ }
+ }
+ }
+
+
+ @Override
+ public String getHash() {
+ CalcHash ch = new CalcHash();
+ if (mapRows == null)
+ return EMPTY_HASH;
+
+ if (mapRows.size() == 0)
+ return EMPTY_HASH;
+
+ //System.out.println(getName());
+ int n = 0;
+ for (RowData rd : mapRows.values()) {
+ ch.addData(rd.getHashRow());
+ //System.out.println("row "+n+" "+rd.getHashRow());
+ n++;
+ }
+
+ return ch.calcHashStr();
+ }
+
+ @Override
+ public int addToGit() throws ExceptionDBGit {
+ int count = super.addToGit();
+
+ if (mapRows == null) return count;
+
+ for (RowData rd : mapRows.values()) {
+ for (ICellData cd : rd.getData(fields).values()) {
+ count += cd.addToGit();
+ }
+ }
+
+ return count;
+ }
+
+ @Override
+ public int removeFromGit() throws ExceptionDBGit {
+ int count = super.removeFromGit();
+
+ if (mapRows == null)
+ return 1;
+
+ for (RowData rd : mapRows.values()) {
+ for (ICellData cd : rd.getData(fields).values()) {
+ count += cd.removeFromGit();
+ }
+ }
+
+ return count;
+ }
+
+ public List getFields() {
+ return fields;
+ }
+
+ private void warnFilesNotFound(){
+ Set filesNotFound = MapFileData.getFilesNotFound();
+ if(filesNotFound != null && filesNotFound.size() > 0){
+ ConsoleWriter.detailsPrintlnColor(DBGitLang.getInstance().getValue("errors", "dataTable", "filesNotFound")
+ .withParams(String.join(";", filesNotFound))
+ , FColor.YELLOW, messageLevel
+ );
+ filesNotFound.clear();
+ }
+
+ }
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/MetaUDT.java b/src/main/java/ru/fusionsoft/dbgit/meta/MetaUDT.java
new file mode 100644
index 0000000..014fd0d
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/MetaUDT.java
@@ -0,0 +1,40 @@
+package ru.fusionsoft.dbgit.meta;
+
+import ru.fusionsoft.dbgit.adapters.AdapterFactory;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.dbobjects.DBSQLObject;
+import ru.fusionsoft.dbgit.dbobjects.DBUserDefinedType;
+
+public class MetaUDT extends MetaSql {
+ public MetaUDT() {
+ }
+
+ public MetaUDT(DBSQLObject sqlObject) throws ExceptionDBGit {
+ super(sqlObject);
+ }
+
+ /**
+ * @return Type meta object
+ */
+ @Override
+ public final IDBGitMetaType getType() {
+ return DBGitMetaType.DBGitUserDefinedType;
+ }
+
+ /**
+ * load current object from DB
+ */
+ @Override
+ public final boolean loadFromDB() throws ExceptionDBGit {
+ final IDBAdapter adapter = AdapterFactory.createAdapter();
+ final NameMeta nm = MetaObjectFactory.parseMetaName(name);
+ final DBUserDefinedType dbObject = adapter.getUDT(nm.getSchema(), nm.getName());
+
+ if (dbObject != null) {
+ setSqlObject(dbObject);
+ return true;
+ } else
+ return false;
+ }
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/SortedListMetaObject.java b/src/main/java/ru/fusionsoft/dbgit/meta/SortedListMetaObject.java
index 35b6f91..16fd538 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/SortedListMetaObject.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/SortedListMetaObject.java
@@ -1,12 +1,14 @@
package ru.fusionsoft.dbgit.meta;
+import com.diogonunes.jcdp.color.api.Ansi;
import com.google.common.collect.Sets;
import ru.fusionsoft.dbgit.core.DBGitLang;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
import ru.fusionsoft.dbgit.dbobjects.DBSQLObject;
import ru.fusionsoft.dbgit.dbobjects.DBTable;
import ru.fusionsoft.dbgit.utils.ConsoleWriter;
-import java.sql.Timestamp;
+
import java.util.*;
import java.util.stream.Collectors;
@@ -21,28 +23,47 @@ public class SortedListMetaObject {
private List listFromFree;
private Collection collection;
- SortedListMetaObject(Collection fromCollection){
+ public SortedListMetaObject(Collection fromCollection){
collection = new ArrayList<>(fromCollection);
calculateImoCrossDependencies();
}
+ public Collection getCollection(){
+ return collection;
+}
+ public List sortFromDependencies() throws ExceptionDBGit {
+ if (listFromDependant == null) {
+ listFromDependant = createSortedList(false);
+ }
+ return listFromDependant;
+
+ }
+ public List sortFromReferenced() throws ExceptionDBGit {
+ if (listFromFree == null) {
+ listFromFree = createSortedList(true);
+ }
+ return listFromFree;
+ }
+
private void calculateImoCrossDependencies(){
- Timestamp timestampBefore = new Timestamp(System.currentTimeMillis());
for(DBGitMetaType metaType : Sets.newHashSet(DBGitMetaType.DBGitTable, DBGitMetaType.DbGitFunction)){
- List objectsOfType = collection.stream().filter(x->x.getType().equals(metaType) ).collect(Collectors.toList());
+ List objectsOfType = collection.stream()
+ .filter( x->x.getType().equals(metaType) )
+ .collect(Collectors.toList());
+
+
Map realNamesToMetaNames = objectsOfType.stream().collect(Collectors.toMap(
- x->x.getUnderlyingDbObject().getSchema() + "." + x.getUnderlyingDbObject().getName(),
- IMetaObject::getName
- )
- );
+ x-> x.getUnderlyingDbObject().getSchema() + "." + x.getUnderlyingDbObject().getName(),
+ IMetaObject::getName
+ ));
for(IMetaObject imo : objectsOfType){
if(imo.getType().equals(DBGitMetaType.DbGitFunction)){
DBSQLObject dbsql = (DBSQLObject) imo.getUnderlyingDbObject();
Set deps = realNamesToMetaNames.keySet().stream()
- .filter( x -> dbsql.getSql().contains(x) && !(dbsql.getSchema()+"."+dbsql.getName()).equals(x) )
+ .filter( x -> dbsql.getSql().contains(x) /*&& !(dbsql.getSchema()+"."+dbsql.getName()).equals(x)*/ )
.map(realNamesToMetaNames::get)
.collect(Collectors.toSet());
dbsql.setDependencies(deps);
@@ -50,88 +71,77 @@ private void calculateImoCrossDependencies(){
if(imo.getType().equals(DBGitMetaType.DBGitTable)){
DBTable dbTable = (DBTable) imo.getUnderlyingDbObject();
Set deps = realNamesToMetaNames.values().stream()
- .filter( x -> dbTable.getDependencies().contains(x) && !x.equals(imo.getName()) )
- .collect(Collectors.toSet());
- dbTable.setDependencies(deps);
+ .filter( x -> dbTable.getDependencies().contains(x) /*&& !x.equals(imo.getName())*/ )
+ .collect(Collectors.toSet());
+ dbTable.getDependencies().addAll(deps);
}
}
}
- Timestamp timestampAfter = new Timestamp(System.currentTimeMillis());
- Long diff = timestampAfter.getTime() - timestampBefore.getTime();
- ConsoleWriter.detailsPrintlnGreen(DBGitLang.getInstance().getValue("general", "time").withParams(diff.toString()));
- };
+ }
- public List sortFromDependant(){
- if (listFromDependant == null) {
- listFromDependant = new ArrayList<>();
- Arrays.stream(DBGitMetaType.values())
- .sorted(Comparator.comparing(DBGitMetaType::getPriority).reversed())
- .forEach(tp -> {
-
- List objectsOfType = collection.stream().filter(x -> x.getType().equals(tp)).collect(Collectors.toList());
- if (!objectsOfType.isEmpty()) {
-
- if (tp.equals(DBGitMetaType.DBGitTable ) || (objectsOfType.get(0) instanceof MetaSql)) {
- List objectsL0 = objectsOfType.stream().filter(x -> x.getUnderlyingDbObject().getDependencies().size() == 0).collect(Collectors.toList());
-
- objectsOfType.removeAll(objectsL0);
- while (!objectsOfType.isEmpty()) {
- Set namesL0 = objectsL0.stream().map(IMetaObject::getName).collect(Collectors.toSet());
- List objectsL1 = objectsOfType
- .stream()
- .filter(x -> namesL0.containsAll(x.getUnderlyingDbObject().getDependencies()))
- .sorted(imoDependenceComparator.reversed())
- .collect(Collectors.toList());
- objectsOfType.removeAll(objectsL1);
- objectsL0.addAll(0, objectsL1);
- }
- listFromDependant.addAll(objectsL0);
- } else {
- listFromDependant.addAll(objectsOfType);
- }
+ public List createSortedList(boolean isSortedFromFree) throws ExceptionDBGit {
+ List list = new ArrayList<>();
+ Comparator typeComparator = isSortedFromFree
+ ? Comparator.comparing(DBGitMetaType::getPriority)
+ : Comparator.comparing(DBGitMetaType::getPriority).reversed();
+ Comparator imoComparator = isSortedFromFree
+ ? imoDependenceComparator
+ : imoDependenceComparator.reversed();
+
+ List types = Arrays
+ .stream(DBGitMetaType.values())
+ .sorted(typeComparator)
+ .collect(Collectors.toList());
+
+ for (DBGitMetaType tp : types) {
+ List objectsOfType = collection.stream().filter(x -> x.getType().equals(tp)).collect(Collectors.toList());
+ if (!objectsOfType.isEmpty()) {
+ if (tp.equals(DBGitMetaType.DBGitTable) || objectsOfType.get(0) instanceof MetaSql) {
+ Set namesAllOfType = objectsOfType.stream().map(IMetaObject::getName).collect(Collectors.toSet());
+ List objectsL0 = objectsOfType.stream()
+ .filter(x -> {
+ Set deps = x.getUnderlyingDbObject().getDependencies();
+ return deps.size() == 0 || ( deps.size() == 1 && deps.contains(x.getName()) );
+ })
+ .collect(Collectors.toList());
+
+ objectsOfType.removeAll(objectsL0);
+ while (!objectsOfType.isEmpty()) {
+ Set namesL0 = objectsL0.stream().map(IMetaObject::getName).collect(Collectors.toSet());
+ List objectsL1 = objectsOfType
+ .stream()
+ .filter(x -> {
+ Set actualDeps = new HashSet<>(x.getUnderlyingDbObject().getDependencies());
+ actualDeps.retainAll(namesAllOfType); //only deps of same type
+ actualDeps.remove(x.getName());
+ return namesL0.containsAll(actualDeps);
+ })
+ .sorted(imoComparator)
+ .collect(Collectors.toList());
+ if (objectsL1.isEmpty()) {
+ warnNotAdded(objectsOfType);
+ throw new ExceptionDBGit("infinite loop");
}
- });
+ objectsOfType.removeAll(objectsL1);
+ if(isSortedFromFree) { objectsL0.addAll(objectsL1); }
+ else { objectsL0.addAll(0, objectsL1); }
+ }
+ list.addAll(objectsL0);
+ } else {
+ list.addAll(objectsOfType);
+ }
+ }
}
- return listFromDependant;
-
- };
- public List sortFromFree(){
- if (listFromFree == null) {
- listFromFree = new ArrayList<>();
- Arrays.stream(DBGitMetaType.values())
- .sorted(Comparator.comparing(DBGitMetaType::getPriority))
- .forEach(tp -> {
-
- List objectsOfType = collection.stream().filter(x -> x.getType().equals(tp)).collect(Collectors.toList());
- if (!objectsOfType.isEmpty()) {
-
- if (tp.equals(DBGitMetaType.DBGitTable) || objectsOfType.get(0) instanceof MetaSql) {
- List objectsL0 = objectsOfType.stream().filter(x -> x.getUnderlyingDbObject().getDependencies().size() == 0).collect(Collectors.toList());
-
- objectsOfType.removeAll(objectsL0);
- while (!objectsOfType.isEmpty()) {
- Set namesL0 = objectsL0.stream().map(IMetaObject::getName).collect(Collectors.toSet());
- List objectsL1 = objectsOfType
- .stream()
- .filter(x -> namesL0.containsAll(x.getUnderlyingDbObject().getDependencies()))
- .sorted(imoDependenceComparator)
- .collect(Collectors.toList());
- objectsOfType.removeAll(objectsL1);
- objectsL0.addAll(objectsL1);
- }
- listFromFree.addAll(objectsL0);
- } else {
- listFromFree.addAll(objectsOfType);
- }
- }
+// int i = 0;
+// for(IMetaObject imo : list){
+// ConsoleWriter.printlnRed(MessageFormat.format("{0}. {1}", i++, imo.getName()));
+// }
+ return list;
+ }
- });
- }
- return listFromFree;
- };
public static Comparator imoTypeComparator = Comparator.comparing(x->x.getType().getPriority());
public static Comparator imoDependenceComparator = (o1, o2) -> {
@@ -152,4 +162,13 @@ public List sortFromFree(){
return result;
};
+ public void warnNotAdded(List remained){
+ ConsoleWriter.printlnRed(DBGitLang.getInstance()
+ .getValue("errors", "unsatisfiedDependencies")
+ , 1
+ );
+
+ remained.forEach( x -> ConsoleWriter.printlnColor(x.getName(), Ansi.FColor.MAGENTA, 1) );
+ }
+
}
diff --git a/src/main/java/ru/fusionsoft/dbgit/meta/TreeMapMetaObject.java b/src/main/java/ru/fusionsoft/dbgit/meta/TreeMapMetaObject.java
index ac1db18..d167b3d 100644
--- a/src/main/java/ru/fusionsoft/dbgit/meta/TreeMapMetaObject.java
+++ b/src/main/java/ru/fusionsoft/dbgit/meta/TreeMapMetaObject.java
@@ -36,11 +36,11 @@ public int compare(String nm1, String nm2) {
}
- public TreeMapMetaObject(List from){
+ public TreeMapMetaObject(Collection from){
this();
this.putAll(from.stream().collect(Collectors.toMap(IMetaObject::getName, key->key)));
}
-
+
@Override
public IMapMetaObject put(IMetaObject obj) {
put(obj.getName(), obj);
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBAdapterMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBAdapterMssql.java
new file mode 100644
index 0000000..f33d99d
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBAdapterMssql.java
@@ -0,0 +1,1398 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.lang3.exception.ExceptionUtils;
+import org.slf4j.Logger;
+import ru.fusionsoft.dbgit.adapters.DBAdapter;
+import ru.fusionsoft.dbgit.adapters.IFactoryDBAdapterRestoteMetaData;
+import ru.fusionsoft.dbgit.adapters.IFactoryDBBackupAdapter;
+import ru.fusionsoft.dbgit.adapters.IFactoryDBConvertAdapter;
+import ru.fusionsoft.dbgit.core.*;
+import ru.fusionsoft.dbgit.core.db.DbType;
+import ru.fusionsoft.dbgit.core.db.FieldType;
+import ru.fusionsoft.dbgit.data_table.*;
+import ru.fusionsoft.dbgit.dbobjects.*;
+import ru.fusionsoft.dbgit.meta.IMapMetaObject;
+import ru.fusionsoft.dbgit.meta.TreeMapMetaObject;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+import ru.fusionsoft.dbgit.utils.LoggerUtil;
+import ru.fusionsoft.dbgit.utils.StringProperties;
+
+import java.sql.*;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+
+public class DBAdapterMssql extends DBAdapter {
+
+ public static final String DEFAULT_MAPPING_TYPE = "varchar";
+ private static final HashSet systemSchemas = new HashSet<>(Arrays.asList(
+ "db_denydatawriter",
+ "db_datawriter",
+ "db_accessadmin",
+ "db_ddladmin",
+ "db_securityadmin",
+ "db_denydatareader",
+ "db_backupoperator",
+ "db_datareader",
+ "db_owner",
+ "sys",
+ "INFORMATION_SCHEMA"
+ ));
+
+ //Stubs for MSSQL adapter, marked as "TODO Auto-generated method stub"
+ //And some unfinished implementations marked as "TODO MSSQL *"
+
+ private Logger logger = LoggerUtil.getLogger(this.getClass());
+ private FactoryDBAdapterRestoreMssql restoreFactory = new FactoryDBAdapterRestoreMssql();
+ private FactoryDbConvertAdapterMssql convertFactory = new FactoryDbConvertAdapterMssql();
+ private FactoryDBBackupAdapterMssql backupFactory = new FactoryDBBackupAdapterMssql();
+
+ @Override
+ public IFactoryDBAdapterRestoteMetaData getFactoryRestore() {
+ return restoreFactory;
+ }
+
+ @Override
+ public void startUpdateDB() {
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public void endUpdateDB() {
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public IMapMetaObject loadCustomMetaObjects() {
+ return new TreeMapMetaObject(Collections.emptyList());
+ }
+
+ @Override
+ public Map getSchemes() {
+ final Map listScheme = new HashMap<>();
+ try (ResultSet rs = getConnection().getMetaData().getSchemas()){
+
+ // made without query
+ // Statement stmt = connect.createStatement();
+ // ResultSet rs = stmt.executeQuery(query);
+ while(rs.next()){
+ final String name = rs.getString("TABLE_SCHEM");
+
+ // May also get catalog names that belong to scheme as "TABLE_CATALOG"
+ if(!systemSchemas.contains(name)) {
+ final DBSchema scheme = new DBSchema(name, new StringProperties(rs));
+ listScheme.put(name, scheme);
+ }
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "schemes").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+
+ return listScheme;
+ }
+
+ @Override
+ public Map getTableSpaces() {
+ final Map listTableSpace = new HashMap<>();
+ final String query = "SELECT \n" +
+ "[SFG].name AS [File Group Name],\n" +
+ "[SFG].*,\n" +
+ "[SDB].name AS [Database Name],\n" +
+ "[F].name AS [File Name],\n" +
+ "[SDBF].name AS [Database File Name],\n" +
+ "[SDBF].physical_name\n" +
+ "INTO #fgroups\n" +
+ "FROM [master].sys.master_files AS [F]\n" +
+ "INNER JOIN sys.databases AS [SDB]\n" +
+ " ON [SDB].database_id = [F].database_id\n" +
+ "INNER JOIN sys.database_files AS [SDBF]\n" +
+ " ON [SDBF].[file_id] = [F].[file_id]\n" +
+ "INNER JOIN sys.filegroups AS [SFG]\n" +
+ " ON [sfg].data_space_id = [F].data_space_id\n" +
+ "SELECT \n" +
+ " [File Group Name],\n" +
+ " [data_space_id],\n" +
+ " [type],\n" +
+ " [type_desc],\n" +
+ " [is_default],\n" +
+ " [is_system],\n" +
+ " [is_read_only],\n" +
+ " [filegroup_guid],\n" +
+ " [log_filegroup_id],\n" +
+ " STUFF((\n" +
+ " SELECT DISTINCT ', ' + [Database Name] \n" +
+ " FROM #fgroups \n" +
+ " WHERE ([File Group Name] = Results.[File Group Name]) \n" +
+ " FOR XML PATH(''),TYPE).value('(./text())[1]','VARCHAR(MAX)')\n" +
+ " ,1,2,'') AS DatabaseNames, \n" +
+ " STUFF((\n" +
+ " SELECT DISTINCT ', ' + [Database File Name] \n" +
+ " FROM #fgroups \n" +
+ " WHERE ([File Group Name] = Results.[File Group Name]) \n" +
+ " FOR XML PATH(''),TYPE).value('(./text())[1]','VARCHAR(MAX)')\n" +
+ " ,1,2,'') AS DatabaseFileNames,\n" +
+ " STUFF((\n" +
+ " SELECT DISTINCT ', ' + [File Name] \n" +
+ " FROM #fgroups \n" +
+ " WHERE ([File Group Name] = Results.[File Group Name]) \n" +
+ " FOR XML PATH(''),TYPE).value('(./text())[1]','VARCHAR(MAX)')\n" +
+ " ,1,2,'') AS FileNames,\n" +
+ " STUFF((\n" +
+ " SELECT DISTINCT ', ' + [physical_name] \n" +
+ " FROM #fgroups \n" +
+ " WHERE ([File Group Name] = Results.[File Group Name]) \n" +
+ " FOR XML PATH(''),TYPE).value('(./text())[1]','VARCHAR(MAX)')\n" +
+ " ,1,2,'') AS PhysicalNames\n" +
+ "FROM #fgroups Results\n" +
+ "GROUP BY [File Group Name],[data_space_id],\n" +
+ " [type],\n" +
+ " [type_desc],\n" +
+ " [is_default],\n" +
+ " [is_system],\n" +
+ " [is_read_only],\n" +
+ " [filegroup_guid],\n" +
+ " [log_filegroup_id]\n" +
+ "DROP TABLE #fgroups\n";
+
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ while(rs.next()){
+ final String name = rs.getString("File Group Name");
+ final DBTableSpace dbTableSpace = new DBTableSpace(name, new StringProperties(rs));
+ listTableSpace.put(name, dbTableSpace);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "tablespace").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+
+ return listTableSpace;
+ }
+
+ @Override
+ public Map getSequences(String schema) {
+ final Map listSequence = new HashMap();
+ final String query =
+ "SELECT seq.*,\n" +
+ "TYPE_NAME(seq.system_type_id) as typeName,\n" +
+ "SCHEMA_NAME(seq.schema_id) as owner \n" +
+ "FROM sys.objects, sys.SEQUENCES seq \n" +
+ "WHERE sys.objects.object_id = seq.object_id \n" +
+ "AND SCHEMA_NAME(seq.schema_id) = '"+schema+"'";
+
+ try(Statement stmtValue = getConnection().createStatement(); ResultSet rs = stmtValue.executeQuery(query)){
+
+ while(rs.next()){
+ final String ownerSeq = "dbo";
+ final String nameSeq = rs.getString("name");
+ final Long valueSeq = rs.getLong("current_value");
+ final DBSequence seq = new DBSequence(nameSeq, new StringProperties(rs), schema, ownerSeq, Collections.emptySet(), valueSeq);
+ listSequence.put(nameSeq, seq);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "seq").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+
+ return listSequence;
+ }
+
+ @Override
+ public DBSequence getSequence(String schema, String name) {
+ final String query =
+ "SELECT seq.*,\n" +
+ "USER_NAME(objectproperty(seq.object_id,'OwnerId')) as owner,\n" +
+ "TYPE_NAME(seq.system_type_id) as typeName, " +
+ "SCHEMA_NAME(seq.schema_id) as schemaName " +
+ "FROM sys.objects, sys.SEQUENCES seq " +
+ "WHERE sys.objects.object_id = seq.object_id " +
+ "AND SCHEMA_NAME(seq.schema_id) = '"+schema+"' " +
+ "AND seq.name = '" + name + "'\n";
+
+ try(Statement stmtValue = getConnection().createStatement(); ResultSet rs = stmtValue.executeQuery(query)){
+
+ if(rs.next()){
+ final String ownerSeq = "dbo";
+ final String nameSeq = rs.getString("name");
+ final Long valueSeq = rs.getLong("current_value");
+ return new DBSequence(nameSeq, new StringProperties(rs), schema, ownerSeq, Collections.emptySet(), valueSeq);
+ } else {
+ final String msg = lang.getValue("errors", "adapter", "objectNotFoundInDb").toString();
+ throw new ExceptionDBGitObjectNotFound(msg);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "seq").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public Map getTables(String schema) {
+ final Map listTable = new HashMap<>();
+ final String query =
+ "SELECT TABLE_NAME as 'name', TABLE_CATALOG as 'database', TABLE_SCHEMA as 'schema'\n" +
+ "FROM INFORMATION_SCHEMA.TABLES \n" +
+ "WHERE INFORMATION_SCHEMA.TABLES.TABLE_SCHEMA = '" + schema + "'\n" +
+ "AND INFORMATION_SCHEMA.TABLES.TABLE_TYPE = 'BASE TABLE'";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);) {
+ while(rs.next()){
+ //TODO retrieve table comment
+ //TODO retrieve table owner
+ final String nameTable = rs.getString("name");
+ final String ownerTable = "";
+ final String commentTable = "";
+ final StringProperties options = new StringProperties(rs);
+ final Set dependencies = rs.getArray("dependencies") != null
+ ? new HashSet<>(Arrays.asList((String[])rs.getArray("dependencies").getArray()))
+ : Collections.emptySet();
+
+ final DBTable table = new DBTable(nameTable, options, schema, ownerTable, dependencies, commentTable);
+ listTable.put(nameTable, table);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "tables").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ return listTable;
+ }
+
+ @Override
+ public DBTable getTable(String schema, String name) {
+ final String query =
+ "SELECT\n" +
+ " o.name tableName, t.TABLE_SCHEMA schemaName, t.TABLE_CATALOG catalogName,\n" +
+ " CASE WHEN o.principal_id is NOT NULL THEN (SELECT name FROM sys.database_principals dp WHERE dp.principal_id=o.principal_id)\n" +
+ " ELSE (SELECT dp.name FROM sys.database_principals dp,sys.schemas s WHERE s.schema_id=o.schema_id and s.principal_id=dp.principal_id)\n" +
+ " END as owner\n" +
+ "FROM sys.objects o, INFORMATION_SCHEMA.TABLES t\n" +
+ "WHERE o.type='U' AND o.name = t.TABLE_NAME AND t.TABLE_NAME = '"+name+"' AND t.TABLE_SCHEMA = '"+schema+"'";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);) {
+ if (rs.next()){
+ //TODO retrieve table comment
+ //TODO retrieve table owner
+ final String nameTable = rs.getString("name");
+ final String ownerTable = "";
+ final String commentTable = "";
+ final StringProperties options = new StringProperties(rs);
+ final Set dependencies = rs.getArray("dependencies") != null
+ ? new HashSet<>(Arrays.asList((String[])rs.getArray("dependencies").getArray()))
+ : Collections.emptySet();
+
+ return new DBTable(nameTable, options, schema, ownerTable, dependencies, commentTable);
+ } else {
+ final String msg = lang.getValue("errors", "adapter", "objectNotFoundInDb").toString();
+ throw new ExceptionDBGitObjectNotFound(msg);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "tables").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public Map getTableFields(String schema, String nameTable) {
+ final Map listField = new HashMap<>();
+ final String query =
+ "SELECT DISTINCT\n" +
+ " c.TABLE_SCHEMA as schemaName,\n" +
+ " c.TABLE_NAME as tableName,\n" +
+ " c.COLUMN_NAME as columnName,\n" +
+ " c.ORDINAL_POSITION as columnOrder,\n" +
+ " c.DATA_TYPE as mssqlType,\n" +
+ " CASE WHEN lower(c.DATA_TYPE) in ('bigint', 'int', 'float', 'decimal', 'money', 'numeric', 'real', 'smallint', 'smallmoney', 'tinyint') then 'number' \n" +
+ " when lower(c.DATA_TYPE) in ('char','varchar','xml','nchar','nvarchar', 'uniqueidentifier') then 'string'\n" +
+ " when lower(c.DATA_TYPE) in ('bit') then 'boolean'\n" +
+ " when lower(c.DATA_TYPE) in ('datetime', 'smalldatetime', 'time') then 'date'\n" +
+ " when lower(c.DATA_TYPE) in ('text','ntext') then 'text'\n" +
+ " when lower(c.DATA_TYPE) in ('timestamp', 'binary', 'varbinary', 'geometry', 'geography') then 'binary'\n" +
+ " else 'native'\n" +
+ " end dbgitType,\n" +
+ " CASE WHEN 1 IN ( \n" +
+ " SELECT OBJECTPROPERTY(OBJECT_ID(CONSTRAINT_SCHEMA + '.' + QUOTENAME(CONSTRAINT_NAME)),'IsPrimaryKey')\n" +
+ " FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE\n" +
+ " WHERE c.COLUMN_NAME = COLUMN_NAME AND c.TABLE_NAME = TABLE_NAME\n" +
+ " )\n" +
+ " THEN 1 ELSE 0 END isPk,\n" +
+ " c.IS_NULLABLE as isNullable,\n" +
+ " c.NUMERIC_SCALE as scale,\n" +
+ " c.CHARACTER_MAXIMUM_LENGTH as length,\n" +
+ " CASE WHEN lower(c.DATA_TYPE) in ('char', 'nchar') then '1' else '0' end isFixed," +
+ " c.NUMERIC_PRECISION as precision\n" +
+ "FROM INFORMATION_SCHEMA.COLUMNS as c\n" +
+ "WHERE TABLE_SCHEMA = '" + schema + "' AND TABLE_NAME = '" + nameTable + "'";
+
+ try(Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);) {
+
+ while(rs.next()){
+ final DBTableField field = DBTableFieldFromRs(rs);
+ listField.put(field.getName(), field);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "tableData").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+
+ return listField;
+ }
+
+ private DBTableField DBTableFieldFromRs(ResultSet rs) throws SQLException {
+ final boolean isPrimaryKey = rs.getString("isPk").equals("1");
+ final boolean isFixed = rs.getBoolean("isFixed");
+ final boolean isNullable = rs.getBoolean("isNullable");
+ final String columnName = rs.getString("columnName").toLowerCase();
+ //TODO make find out column comment
+ final String columnDesc = "";
+ //TODO make find out column default value
+ final String columnDefault = "";
+ final String typeSQL = getFieldType(rs);
+ final FieldType typeUniversal = FieldType.fromString(rs.getString("dbgitType").toUpperCase());
+ final int length = rs.getInt("length");
+ final int scale = rs.getInt("scale");
+ final int precision = rs.getInt("precision");
+ final int order = rs.getInt("order");
+
+ return new DBTableField(
+ columnName,
+ columnDesc == null ? "" : columnDesc,
+ isPrimaryKey, isNullable,
+ typeSQL, typeUniversal, order,
+ columnDefault == null ? "" : columnDefault,
+ length, precision, scale, isFixed
+ );
+
+ }
+
+ protected String getFieldType(ResultSet rs) throws SQLException {
+
+ final StringBuilder type = new StringBuilder();
+ final Integer max_length = rs.getInt("length");
+ final String mssqlType = rs.getString("mssqlType");
+ final boolean isNotNull = rs.getString("isNullable").equals("NO");
+
+ type.append(mssqlType);
+ if (!rs.wasNull()) {
+ type.append("("+max_length.toString()+")");
+ }
+ if (isNotNull){
+ type.append(" NOT NULL");
+ }
+
+ return type.toString();
+
+ }
+
+ public Map getIndexesWithPks(String schema, String nameTable) {
+ final Map indexes = new HashMap<>();
+ final String query =
+ " SELECT DB_NAME() AS databaseName,\n" +
+ " sc.name as schemaName, \n" +
+ " t.name AS tableName,\n" +
+ " col.name as columnName,\n" +
+ " si.name AS indexName,\n" +
+ " si.is_primary_key isPk," +
+ " si.index_id as indexId,\n" +
+ " si.type_desc as typeName, \n" +
+ " CASE si.index_id WHEN 0 THEN NULL\n" +
+ " ELSE \n" +
+ " CASE is_primary_key WHEN 1 THEN\n" +
+ " N'ALTER TABLE ' + QUOTENAME(sc.name) + N'.' + QUOTENAME(t.name) + N' ADD CONSTRAINT ' + QUOTENAME(si.name) + N' PRIMARY KEY ' +\n" +
+ " CASE WHEN si.index_id > 1 THEN N'NON' ELSE N'' END + N'CLUSTERED '\n" +
+ " ELSE N'CREATE ' + \n" +
+ " CASE WHEN si.is_unique = 1 then N'UNIQUE ' ELSE N'' END +\n" +
+ " CASE WHEN si.index_id > 1 THEN N'NON' ELSE N'' END + N'CLUSTERED ' +\n" +
+ " N'INDEX ' + QUOTENAME(si.name) + N' ON ' + QUOTENAME(sc.name) + N'.' + QUOTENAME(t.name) + N' '\n" +
+ " END +\n" +
+ " /* key def */ N'(' + key_definition + N')' +\n" +
+ " /* includes */ CASE WHEN include_definition IS NOT NULL THEN \n" +
+ " N' INCLUDE (' + include_definition + N')'\n" +
+ " ELSE N''\n" +
+ " END +\n" +
+ " /* filters */ CASE WHEN filter_definition IS NOT NULL THEN \n" +
+ " N' WHERE ' + filter_definition ELSE N''\n" +
+ " END +\n" +
+ " /* with clause - compression goes here */\n" +
+ " CASE WHEN row_compression_partition_list IS NOT NULL OR page_compression_partition_list IS NOT NULL \n" +
+ " THEN N' WITH (' +\n" +
+ " CASE WHEN row_compression_partition_list IS NOT NULL THEN\n" +
+ " N'DATA_COMPRESSION = ROW ' + CASE WHEN psc.name IS NULL THEN N'' ELSE + N' ON PARTITIONS (' + row_compression_partition_list + N')' END\n" +
+ " ELSE N'' END +\n" +
+ " CASE WHEN row_compression_partition_list IS NOT NULL AND page_compression_partition_list IS NOT NULL THEN N', ' ELSE N'' END +\n" +
+ " CASE WHEN page_compression_partition_list IS NOT NULL THEN\n" +
+ " N'DATA_COMPRESSION = PAGE ' + CASE WHEN psc.name IS NULL THEN N'' ELSE + N' ON PARTITIONS (' + page_compression_partition_list + N')' END\n" +
+ " ELSE N'' END\n" +
+ " + N')'\n" +
+ " ELSE N''\n" +
+ " END +\n" +
+ " ' ON ' + CASE WHEN psc.name is null \n" +
+ " THEN ISNULL(QUOTENAME(fg.name),N'')\n" +
+ " ELSE psc.name + N' (' + partitioning_column.column_name + N')' \n" +
+ " END\n" +
+ " + N';'\n" +
+ " END AS ddl,\n" +
+ " si.has_filter,\n" +
+ " si.is_unique,\n" +
+ " ISNULL(pf.name, NULL) AS partition_function,\n" +
+ " ISNULL(psc.name, fg.name) AS partition_scheme_or_filegroup\n" +
+ "FROM sys.indexes AS si \n" +
+ "JOIN sys.index_columns ic ON si.object_id = ic.object_id and si.index_id = ic.index_id \n" +
+ "JOIN sys.columns col ON ic.object_id = col.object_id and ic.column_id = col.column_id \n" +
+ "JOIN sys.tables AS t ON si.object_id=t.object_id\n" +
+ "JOIN sys.schemas AS sc ON t.schema_id=sc.schema_id\n" +
+ "LEFT JOIN sys.dm_db_index_usage_stats AS stat ON \n" +
+ " stat.database_id = DB_ID() \n" +
+ " and si.object_id=stat.object_id \n" +
+ " and si.index_id=stat.index_id\n" +
+ "LEFT JOIN sys.partition_schemes AS psc ON si.data_space_id=psc.data_space_id\n" +
+ "LEFT JOIN sys.partition_functions AS pf ON psc.function_id=pf.function_id\n" +
+ "LEFT JOIN sys.filegroups AS fg ON si.data_space_id=fg.data_space_id\n" +
+ "OUTER APPLY ( SELECT STUFF (\n" +
+ " (SELECT N', ' + QUOTENAME(c.name) +\n" +
+ " CASE ic.is_descending_key WHEN 1 then N' DESC' ELSE N'' END\n" +
+ " FROM sys.index_columns AS ic \n" +
+ " JOIN sys.columns AS c ON \n" +
+ " ic.column_id=c.column_id \n" +
+ " and ic.object_id=c.object_id\n" +
+ " WHERE ic.object_id = si.object_id\n" +
+ " and ic.index_id=si.index_id\n" +
+ " and ic.key_ordinal > 0\n" +
+ " ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'),1,2,'')) AS keys ( key_definition )\n" +
+ "OUTER APPLY (\n" +
+ " SELECT MAX(QUOTENAME(c.name)) AS column_name\n" +
+ " FROM sys.index_columns AS ic \n" +
+ " JOIN sys.columns AS c ON \n" +
+ " ic.column_id=c.column_id \n" +
+ " and ic.object_id=c.object_id\n" +
+ " WHERE ic.object_id = si.object_id\n" +
+ " and ic.index_id=si.index_id\n" +
+ " and ic.partition_ordinal = 1) AS partitioning_column\n" +
+ "OUTER APPLY ( SELECT STUFF (\n" +
+ " (SELECT N', ' + QUOTENAME(c.name)\n" +
+ " FROM sys.index_columns AS ic \n" +
+ " JOIN sys.columns AS c ON \n" +
+ " ic.column_id=c.column_id \n" +
+ " and ic.object_id=c.object_id\n" +
+ " WHERE ic.object_id = si.object_id\n" +
+ " and ic.index_id=si.index_id\n" +
+ " and ic.is_included_column = 1\n" +
+ " ORDER BY c.name FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'),1,2,'')) AS includes ( include_definition )\n" +
+ "OUTER APPLY ( SELECT STUFF (\n" +
+ " (SELECT N', ' + CAST(p.partition_number AS VARCHAR(32))\n" +
+ " FROM sys.partitions AS p\n" +
+ " WHERE p.object_id = si.object_id\n" +
+ " and p.index_id=si.index_id\n" +
+ " and p.data_compression = 1\n" +
+ " ORDER BY p.partition_number FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'),1,2,'')) AS row_compression_clause ( row_compression_partition_list )\n" +
+ "OUTER APPLY ( SELECT STUFF (\n" +
+ " (SELECT N', ' + CAST(p.partition_number AS VARCHAR(32))\n" +
+ " FROM sys.partitions AS p\n" +
+ " WHERE p.object_id = si.object_id\n" +
+ " and p.index_id=si.index_id\n" +
+ " and p.data_compression = 2\n" +
+ " ORDER BY p.partition_number FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'),1,2,'')) AS page_compression_clause ( page_compression_partition_list )\n" +
+ "WHERE si.type IN (1,2) /* clustered, nonclustered */\n" +
+// "AND si.is_primary_key = 0 /* no PKs */\n" +
+ "AND si.is_hypothetical = 0 /* bugged feature, always better to delete, no need to store and reconstruct them */\n" +
+ "AND upper(t.name) = upper('" + nameTable + "') AND upper(sc.name) = upper('" + schema + "')" +
+ "OPTION (RECOMPILE);";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ while(rs.next()){
+ final String name = rs.getString("indexName");
+ final String owner = rs.getString("owner");
+ final String sql = rs.getString("ddl");
+ final DBIndex index = new DBIndex(name, new StringProperties(rs), schema, owner, Collections.emptySet(), sql);
+
+ indexes.put(index.getName(), index);
+ }
+
+ return indexes;
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "indexes").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public Map getIndexes(String schema, String nameTable){
+ final Map indexes = getIndexesWithPks(schema, nameTable);
+
+ indexes.values().removeIf(x->x.getOptions().getChildren().get("ispk").getData().equals("1"));
+ return indexes;
+ }
+
+ @Override
+ public Map getConstraints(String schema, String nameTable) {
+ final Map constraints = new HashMap<>();
+ final ArrayList queries = new ArrayList<>();
+ //TODO [] in object names
+ //check
+ queries.add(
+ "SELECT sc.name as schemaName, t.name as tableName, col.name as columnName, c.name as constraintName, c.name as indexName, c.type_desc as constraintType, \n" +
+ "'ALTER TABLE ' + sc.name + '.' + t.name + ' ADD CONSTRAINT ' + c.name + ' CHECK ' + c.definition + ';' as ddl\n" +
+ "FROM sys.check_constraints c\n" +
+ "JOIN sys.tables t ON c.parent_object_id = t.object_id \n" +
+ "LEFT OUTER JOIN sys.columns col on col.column_id = c.parent_column_id AND col.object_id = c.parent_object_id\n" +
+ "JOIN sys.schemas AS sc ON t.schema_id=sc.schema_id \n" +
+ "WHERE t.name = :name AND sc.name = :schema");
+ //default
+ queries.add(
+ "SELECT sc.name AS schemaName, t.name AS tableName, col.name AS columnName, c.name AS constraintName, c.type_desc AS constraintType, \n" +
+ "'ALTER TABLE ' + sc.name + '.' + t.name + ' ADD CONSTRAINT ' + c.name+ ' DEFAULT ' \n" +
+ " + CASE WHEN ISNUMERIC(ic.COLUMN_DEFAULT) = 1 \n" +
+ " THEN TRY_CONVERT(nvarchar, TRY_CONVERT(numeric, ic.COLUMN_DEFAULT))\n" +
+ " ELSE '' + ic.COLUMN_DEFAULT + '' END\n" +
+ " + ' FOR [' + col.name + '];' AS ddl\n" +
+ "FROM sys.default_constraints c\n" +
+ "JOIN sys.tables t ON c.parent_object_id = t.object_id \n" +
+ "JOIN sys.columns col ON col.default_object_id = c.object_id\n" +
+ "JOIN sys.schemas AS sc ON t.schema_id=sc.schema_id \n" +
+ "JOIN INFORMATION_SCHEMA.COLUMNS ic on t.name = ic.TABLE_NAME AND col.name = ic.COLUMN_NAME \n" +
+ "WHERE t.name = :name AND sc.name = :schema\n"
+ );
+ //unique
+ queries.add(
+ "SELECT TC.TABLE_SCHEMA AS schemaName, TC.TABLE_NAME AS tableName, CC.Column_Name AS columnName, TC.Constraint_Name AS constraintName, TC.CONSTRAINT_TYPE AS constraintType,\n" +
+ "'ALTER TABLE ' + TC.TABLE_SCHEMA + '.' + TC.TABLE_NAME + ' ADD CONSTRAINT ' + TC.CONSTRAINT_NAME + ' UNIQUE NONCLUSTERED ([' + CC.COLUMN_NAME + ']);' AS ddl\n" +
+ "FROM INFORMATION_SCHEMA.table_constraints TC\n" +
+ "INNER JOIN INFORMATION_SCHEMA.constraint_column_usage CC on TC.Constraint_Name = CC.Constraint_Name\n" +
+ "WHERE TC.constraint_type = 'Unique' AND TC.TABLE_NAME = :name AND TC.TABLE_SCHEMA = :schema ---- PARAMETER 1,2\n"
+ );
+ //foreign
+ queries.add(
+ "SELECT ss.name as schemaName, t.name as tableName, sc.name as columnName, o.name as constraintName, o.type_desc as constraintType, refss.name as refSchemaName, refst.name as refTableName, refsc.name as refColumnName, " +
+ "'ALTER TABLE ' + ss.name + '.' + t.name + ' ADD CONSTRAINT ' + o.name + ' FOREIGN KEY ('+ sc.name + ') references ' + refss.name + '.' + refst.name + '(' + refsc.name + ');' as ddl\n" +
+ "FROM sys.foreign_key_columns c\n" +
+ "JOIN sys.objects o ON c.constraint_object_id = o.object_id\n" +
+ "LEFT OUTER JOIN sys.tables t on t.object_id = c.parent_object_id \n" +
+ "LEFT OUTER JOIN sys.schemas ss on ss.schema_id = o.schema_id \n" +
+ "LEFT OUTER JOIN sys.columns sc on sc.object_id = c.parent_object_id AND sc.column_id = c.parent_column_id\n" +
+ "LEFT OUTER JOIN sys.tables refst on refst.object_id = c.referenced_object_id\n" +
+ "LEFT OUTER JOIN sys.schemas refss on refss.schema_id = refst.schema_id\n" +
+ "LEFT OUTER JOIN sys.columns refsc on refsc.object_id = c.referenced_object_id AND refsc.column_id = c.referenced_column_id \n" +
+ "WHERE t.name = :name AND ss.name = :schema\n"
+ );
+
+
+ final Iterator it = queries.iterator();
+ while (it.hasNext()) {
+ final String query = it.next();
+ try (
+ PreparedStatement stmt = preparedStatement(getConnection(), query, ImmutableMap.of("name", nameTable, "schema" , schema));
+ ResultSet rs = stmt.executeQuery(query);
+ ){
+
+ while (rs.next()) {
+ final String name = rs.getString("constraintName");
+ final String type = rs.getString("constraintType");
+ final String sql = rs.getString("ddl");
+ final String owner = schema;
+
+ final DBConstraint con = new DBConstraint(name, new StringProperties(rs), schema, owner, Collections.emptySet(), sql, type);
+ constraints.put(con.getName(), con);
+ }
+ } catch (Exception ex){
+ final String msg = lang.getValue("errors", "adapter", "constraints").toString();
+ throw new ExceptionDBGitRunTime(msg, ex);
+ }
+ }
+
+ //primary keys
+ final Map indexes = getIndexesWithPks(schema, nameTable);
+ indexes.values().removeIf(x->x.getOptions().getChildren().get("ispk").getData().equals("0"));
+
+ for( DBIndex pki : indexes.values() ){
+ final String constraintType = pki.getOptions().getChildren().get("typename").getData();
+ final DBConstraint pkc = new DBConstraint(
+ pki.getName(),
+ pki.getOptions(),
+ pki.getSchema(),
+ pki.getOwner(),
+ new HashSet<>(pki.getDependencies()),
+ pki.getSql(),
+ constraintType
+ );
+ pkc.setOptions(pki.getOptions());
+ constraints.put(pkc.getName(), pkc);
+ }
+
+ return constraints;
+
+ }
+
+ @Override
+ public Map getViews(String schema) {
+ final Map listView = new HashMap();
+ final String query =
+ "SELECT \n" +
+ " sp.name as ownerName, sp.type_desc as ownerType, ss.name AS schemaName, sv.name AS viewName, sm.definition as ddl, \n" +
+ " sv.type_desc as typeName, sm.uses_ansi_nulls, sm.uses_quoted_identifier, sm.is_schema_bound, \n" +
+ " OBJECTPROPERTYEX(sv.object_id,'IsIndexable') AS IsIndexable,\n" +
+ " OBJECTPROPERTYEX(sv.object_id,'IsIndexed') AS IsIndexed\n" +
+ "FROM sys.views sv\n" +
+ "JOIN sys.schemas ss ON sv.schema_id = ss.schema_id\n" +
+ "LEFT OUTER JOIN sys.sql_modules sm on sv.object_id = sm.object_id\n" +
+ "LEFT OUTER JOIN sys.database_principals sp on sv.principal_id = sp.principal_id";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ while(rs.next()){
+ final String name = rs.getString("viewName");
+ final String schemaName = rs.getString("schemaName");
+ final String owner = rs.getString("ownerName");
+ final String sql = rs.getString("ddl");
+
+ final DBView view = new DBView(name, new StringProperties(rs), schema, owner, Collections.emptySet(), sql);
+ listView.put(name, view);
+ }
+ return listView;
+
+ } catch(Exception e) {
+ final DBGitLang msg = lang.getValue("errors", "adapter", "views");
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public DBView getView(String schema, String name) {
+ //TODO single-version query with ExceptionDBGitNotFound
+ try {
+ return getViews(schema).get(name);
+ } catch(Exception e) {
+ final DBGitLang msg = lang.getValue("errors", "adapter", "views");
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public Map getPackages(String schema) {
+ // No such implementation in MSSQL
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public DBPackage getPackage(String schema, String name) {
+ // No such implementation in MSSQL
+ throw new ExceptionDBGitRunTime(new ExceptionDBGitObjectNotFound("cannot get packages on mssql"));
+ }
+
+ @Override
+ public Map getProcedures(String schema) {
+ final Map listProcedure = new HashMap();
+ final String query =
+ "SELECT s.name schemaName, o.name procedureName, o.type_desc as typeName, definition ddl, USER_NAME(so.uid) AS owner \n" +
+ "FROM sys.sql_modules m\n" +
+ "JOIN sys.procedures p ON m.object_id = p.object_id\n" +
+ "JOIN sys.objects o \n" +
+ " ON o.object_id = p.object_id \n" +
+ " AND Left(o.name, 3) NOT IN ('sp_', 'xp_', 'ms_') \n" +
+ "JOIN sys.schemas s ON s.schema_id = o.schema_id\n" +
+ "JOIN sysobjects so on o.object_id = so.id\n" +
+ "WHERE s.name = '" + schema + "'\n";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+ while(rs.next()){
+ final String name = rs.getString("procedureName");
+ final String owner = rs.getString("owner");
+ final String sql = rs.getString("ddl");
+ final StringProperties options = new StringProperties(rs);
+ final DBProcedure proc = new DBProcedure(name, options, schema, owner, Collections.emptySet(), sql);
+ listProcedure.put(name, proc);
+ }
+ }catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "prc").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ return listProcedure;
+ }
+
+ @Override
+ public DBProcedure getProcedure(String schema, String name) {
+ final String query =
+ "SELECT s.name schemaName, o.name procedureName, o.type_desc as typeName, definition ddl, USER_NAME(so.uid) AS owner \n" +
+ "FROM sys.sql_modules m\n" +
+ "JOIN sys.procedures p ON m.object_id = p.object_id\n" +
+ "JOIN sys.objects o \n" +
+ " ON o.object_id = p.object_id \n" +
+ " AND Left(o.name, 3) NOT IN ('sp_', 'xp_', 'ms_') -- filter out system ones\n" +
+ "JOIN sys.schemas s ON s.schema_id = o.schema_id\n" +
+ "JOIN sysobjects so on o.object_id = so.id \n" +
+ "WHERE s.name = '" + schema + "' AND o.name = '" + name + "'";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ if (!rs.next()) throw new ExceptionDBGitObjectNotFound("");
+
+ final String owner = rs.getString("owner");
+ final String procedureName = rs.getString("procedureName");
+ final String sql = rs.getString("ddl");
+ final StringProperties options = new StringProperties(rs);
+
+ return new DBProcedure(procedureName, options, schema, owner, Collections.emptySet(), sql);
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "prc").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public Map getFunctions(String schema) {
+ final Map listFunction = new HashMap<>();
+ final String query =
+ "SELECT ss.name schemaName, o.name functionName, type_desc typeName, definition ddl, USER_NAME(so.uid) owner \n" +
+ "FROM sys.sql_modules m \n" +
+ "INNER JOIN sys.objects o ON m.object_id = o.object_id\n" +
+ "INNER JOIN sysobjects so ON m.object_id = so.id\n" +
+ "INNER JOIN sys.schemas ss ON ss.schema_id = o.schema_id\n" +
+ "WHERE type_desc like '%function%' AND ss.name = '" + schema + "'\n";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ while(rs.next()){
+
+ final String name = rs.getString("functionName");
+ final String owner = rs.getString("owner");
+ final String sql = rs.getString("ddl");
+ final StringProperties options = new StringProperties(rs);
+
+ final DBFunction func = new DBFunction(name, options, schema, owner, Collections.emptySet(), sql);
+ listFunction.put(name, func);
+ }
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "fnc").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ return listFunction;
+ }
+
+ @Override
+ public DBFunction getFunction(String schema, String name) {
+ final String query =
+ "SELECT ss.name schemaName, o.name functionName, type_desc typeName, definition ddl, USER_NAME(so.uid) owner \n" +
+ "FROM sys.sql_modules m \n" +
+ "INNER JOIN sys.objects o ON m.object_id = o.object_id\n" +
+ "INNER JOIN sysobjects so ON m.object_id = so.id\n" +
+ "INNER JOIN sys.schemas ss ON ss.schema_id = o.schema_id\n" +
+ "WHERE type_desc like '%function%' AND ss.name = '" + schema + "' AND o.name = '" + name + "'\n";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ if (!rs.next()) throw new ExceptionDBGitObjectNotFound("");
+
+ final String functionName = rs.getString("functionName");
+ final String owner = rs.getString("owner");
+ final String sql = rs.getString("ddl");
+ final StringProperties options = new StringProperties(rs);
+
+ return new DBFunction(functionName, options, schema, owner, Collections.emptySet(), sql);
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "fnc").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ //TODO Discuss scenario when we get an encrypted TRIGGER, IMO display a warning,
+ // it is not possible to get definition of an encrypted trigger
+
+ public Map getTriggers(String schema) {
+ final Map listTrigger = new HashMap();
+ final String query =
+ "SELECT \n" +
+ " s.name schemaName, \n" +
+ " o.name triggerName, \n" +
+ " USER_NAME(o.uid) owner, \n" +
+ " OBJECT_NAME(parent_obj) tableName, \n" +
+ " m.definition as ddl, \n" +
+ " OBJECTPROPERTY(id, 'IsEncrypted') AS encrypted \n" +
+ "FROM sysobjects o\n" +
+ "INNER JOIN sys.tables t ON o.parent_obj = t.object_id \n" +
+ "INNER JOIN sys.schemas s ON t.schema_id = s.schema_id \n" +
+ "INNER JOIN sys.sql_modules m ON m.object_id = o.id\n" +
+ "WHERE o.type = 'TR' AND s.name = '" + schema + "'\n";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ while(rs.next()){
+ final String name = rs.getString("triggerName");
+ final String owner = rs.getString("owner");
+ final String sql = rs.getString("ddl");
+ final StringProperties options = new StringProperties(rs);
+
+ DBTrigger trigger = new DBTrigger(name, options, schema, owner, Collections.emptySet(), sql);
+ listTrigger.put(name, trigger);
+ }
+ return listTrigger;
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "triggers").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ public DBTrigger getTrigger(String schema, String name) {
+ final String query =
+ "SELECT \n" +
+ " s.name schemaName, \n" +
+ " o.name triggerName, \n" +
+ " USER_NAME(o.uid) owner, \n" +
+ " OBJECT_NAME(parent_obj) tableName, \n" +
+ " m.definition as ddl, \n" +
+ " OBJECTPROPERTY(id, 'IsEncrypted') AS encrypted \n" +
+ "FROM sysobjects o\n" +
+ "INNER JOIN sys.tables t ON o.parent_obj = t.object_id \n" +
+ "INNER JOIN sys.schemas s ON t.schema_id = s.schema_id \n" +
+ "INNER JOIN sys.sql_modules m ON m.object_id = o.id\n" +
+ "WHERE o.type = 'TR' AND s.name = '" + schema + "' AND o.name = '" + name + "'\n";
+
+ try (Statement stmt = getConnection().createStatement(); ResultSet rs = stmt.executeQuery(query);){
+
+ if(!rs.next()) throw new ExceptionDBGitObjectNotFound("");
+
+ final String tname = rs.getString("triggerName");
+ final String owner = rs.getString("owner");
+ final String sql = rs.getString("ddl");
+ final StringProperties options = new StringProperties(rs);
+
+ return new DBTrigger(name, options, schema, owner, Collections.emptySet(), sql);
+
+ } catch(Exception e) {
+ final String msg = lang.getValue("errors", "adapter", "triggers").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+
+ @Override
+ public DBTableData getTableData(String schema, String nameTable) {
+ final String dataQuery = "SELECT * FROM [" + schema + "].[" + nameTable + "]";
+
+ final int maxRowsCount = DBGitConfig.getInstance().getInteger(
+ "core", "MAX_ROW_COUNT_FETCH",
+ DBGitConfig.getInstance().getIntegerGlobal("core", "MAX_ROW_COUNT_FETCH", MAX_ROW_COUNT_FETCH)
+ );
+
+ final boolean isLimitedFetch = DBGitConfig.getInstance().getBoolean(
+ "core", "LIMIT_FETCH",
+ DBGitConfig.getInstance().getBooleanGlobal("core", "LIMIT_FETCH", true)
+ );
+
+ try{
+
+ if (isLimitedFetch) {
+ final String rowsCountQuery =
+ "SELECT COALESCE(SUM(PART.rows), 0) AS rowsCount\n" +
+ "FROM sys.tables TBL\n" +
+ "INNER JOIN sys.partitions PART ON TBL.object_id = PART.object_id\n" +
+ "INNER JOIN sys.indexes IDX ON PART.object_id = IDX.object_id AND PART.index_id = IDX.index_id\n" +
+ "INNER JOIN sys.schemas S ON S.schema_id = TBL.schema_id\n" +
+ "WHERE TBL.name = '"+nameTable+"' AND S.name = '"+schema+"' AND IDX.index_id < 2\n" +
+ "GROUP BY TBL.object_id, TBL.name";
+
+ try(
+ Statement st = getConnection().createStatement();
+ ResultSet rs = st.executeQuery(rowsCountQuery);
+ ){
+ if(!rs.next()) throw new ExceptionDBGitRunTime("rows coubt resultset is empty");
+ if (rs.getInt("rowsCount") > maxRowsCount) {
+ return new DBTableData(DBTableData.ERROR_LIMIT_ROWS);
+ }
+ }
+
+ }
+
+ return new DBTableData(getConnection(), dataQuery);
+
+ } catch (Exception e){
+ final String msg = DBGitLang.getInstance().getValue("errors", "adapter", "tableData").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+
+ }
+
+ @Override
+ public DBTableData getTableDataPortion(String schema, String nameTable, int portionIndex, int tryNumber) {
+
+ final int portionSize = DBGitConfig.getInstance().getInteger( "core", "PORTION_SIZE",
+ DBGitConfig.getInstance().getIntegerGlobal("core", "PORTION_SIZE", 1000)
+ );
+
+ final int dataOffset = portionSize * portionIndex;
+ final String dataQuery =
+ "SELECT * " +
+ "FROM " + schema + "." + nameTable + " " +
+ "ORDER BY (SELECT NULL) " +
+ "OFFSET " + dataOffset + " ROWS " +
+ "FETCH NEXT " + portionSize + " ROWS ONLY ";
+
+ try {
+
+ /* For version <= SQL Server 2005
+
+ int begin = 1 + portionSize*portionIndex;
+ int end = portionSize + portionSize*portionIndex;
+
+ ResultSet rs = st.executeQuery(
+ "SELECT * FROM (" +
+ " SELECT *, ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) rownum" +
+ " FROM dbo.Product" +
+ ") t" +
+ "WHERE rownum BETWEEN " + begin + " AND "+ end
+ );
+ */
+
+ return new DBTableData(getConnection(), dataQuery);
+
+ } catch (Exception e) {
+
+ final int maxTriesCount = DBGitConfig.getInstance().getInteger("core", "TRY_COUNT", DBGitConfig.getInstance().getIntegerGlobal("core", "TRY_COUNT", 1000));
+ final int tryDelay = DBGitConfig.getInstance().getInteger("core", "TRY_DELAY", DBGitConfig.getInstance().getIntegerGlobal("core", "TRY_DELAY", 1000));
+
+ ConsoleWriter.println(e.getLocalizedMessage(), messageLevel);
+ ConsoleWriter.detailsPrintln(ExceptionUtils.getStackTrace(e), messageLevel);
+ logger.error(lang.getValue("errors", "adapter", "tableData").toString(), e);
+
+ if (tryNumber <= maxTriesCount) {
+
+ final String waitMessage = DBGitLang.getInstance()
+ .getValue("errors", "dataTable", "wait")
+ .withParams(String.valueOf(tryDelay));
+
+ final String tryAgainMessage = DBGitLang.getInstance()
+ .getValue("errors", "dataTable", "tryAgain")
+ .withParams(String.valueOf(tryNumber));
+
+ ConsoleWriter.println(waitMessage, messageLevel);
+ try { TimeUnit.SECONDS.sleep(tryDelay); } catch (InterruptedException e1) {
+ throw new ExceptionDBGitRunTime(e1.getMessage());
+ }
+
+ ConsoleWriter.println(tryAgainMessage, messageLevel);
+ return getTableDataPortion(schema, nameTable, portionIndex, tryNumber++);
+
+ } else {
+ final String msg = DBGitLang.getInstance().getValue("errors", "adapter", "tableData").toString();
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ }
+ }
+
+ @Override
+ public Map getUsers() {
+ final Map listUser = new HashMap();
+ final String query =
+ "DECLARE @crlf VARCHAR(2)\n" +
+ "SELECT \n" +
+ " u.name userName, sp.name loginName, sp.default_database_name databaseName, dp.default_schema_name as schemaName,\n" +
+ " CASE WHEN sp.is_disabled IS NULL THEN 1 ELSE sp.is_disabled END isDisabledLogin,\n" +
+ " CASE WHEN sp.name IS NOT NULL THEN 'CREATE LOGIN [' + sp.name + '] WITH PASSWORD = ' \n" +
+ " + UPPER(master.dbo.fn_varbintohexstr (CAST(LOGINPROPERTY(sp.name,'PASSWORDHASH') as VARBINARY (256)))) + ' HASHED; ' ELSE '' END \n" +
+ " + CASE WHEN sp.is_disabled IS NOT NULL AND sp.is_disabled = 0 AND dr.permission_name IS NOT NULL THEN 'GRANT CONNECT SQL TO [' + sp.name + ']; ' ELSE '' END \n" +
+ " + 'CREATE USER [' + u.name + '] ' \n" +
+ " + CASE WHEN sp.name IS NOT NULL THEN 'FOR LOGIN [' + sp.name + ']' ELSE '' END\n" +
+ " + CASE WHEN dp.default_schema_name IS NOT NULL THEN ' WITH DEFAULT_SCHEMA = [' + dp.default_schema_name + ']' ELSE '' END + ';' \n" +
+ " AS ddl, \n" +
+ " UPPER(master.dbo.fn_varbintohexstr (CAST(LOGINPROPERTY(sp.name,'PASSWORDHASH') as VARBINARY (256)))) passwordHash\n" +
+ "FROM sys.sysusers u \n" +
+ "INNER JOIN sys.database_principals dp ON dp.sid = u.sid\n" +
+ "LEFT OUTER JOIN sys.server_principals sp ON sp.sid = u.sid\n" +
+ "LEFT OUTER JOIN sys.database_permissions dr ON dr.grantee_principal_id = dp.principal_id AND dr.permission_name = 'CONNECT'\n" +
+ "WHERE dp.type_desc = 'SQL_USER' AND u.name NOT IN ('dbo','guest') AND u.name NOT LIKE '##MS%'";
+
+ try (
+ Statement stmt = getConnection().createStatement();
+ ResultSet rs = stmt.executeQuery(query);
+ ){
+
+ while(rs.next()){
+ final String name = rs.getString(1);
+ final StringProperties options = new StringProperties(rs);
+
+ DBUser user = new DBUser(name, options);
+ listUser.put(name, user);
+ }
+
+ } catch(Exception e) {
+ final DBGitLang msg = lang.getValue("errors", "adapter", "users");
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+
+ return listUser;
+ }
+
+ @Override
+ public Map getRoles() {
+ final Map listRole = new HashMap();
+ final String usersQuery =
+ "SELECT \n" +
+ " dp.name roleName, dp.is_fixed_role isFixedRole,\n" +
+ " CASE WHEN dp.is_fixed_role = 0 AND dp.name != 'public' THEN dbo.GetRoleDDL(dp.name) ELSE '' " +
+ " END roleDDL,\n" +
+ " dbo.GetRoleMembersDDL(dp.name) membersDDL,\n" +
+ " CASE WHEN dp.is_fixed_role = 1 OR dp.name = 'public'" +
+ " THEN dbo.GetRoleMembersDDL(dp.name) " +
+ " ELSE dbo.GetRoleDDL(dp.name) + dbo.GetRoleMembersDDL(dp.name) " +
+ " END ddl\n" +
+ "FROM sys.database_principals dp\n" +
+ "WHERE dp.type = 'R'\n" +
+ "DROP FUNCTION GetRoleDDL\n" +
+ "DROP FUNCTION GetRoleMembersDDL";
+
+ List setupQueries = Arrays.asList(
+ "IF OBJECT_ID(N'GetRoleDDL', N'FN') IS NOT NULL DROP FUNCTION GetRoleDDL\n" +
+ "IF OBJECT_ID(N'GetRoleMembersDDL', N'FN') IS NOT NULL DROP FUNCTION GetRoleMembersDDL\n"
+ ,
+ "CREATE FUNCTION dbo.GetRoleDDL(@roleName VARCHAR(255))\n" +
+ "RETURNS VARCHAR(MAX)\n" +
+ "BEGIN\n" +
+ " -- Script out the Role\n" +
+ " DECLARE @roleDesc VARCHAR(MAX)\n" +
+ " SET @roleDesc = 'CREATE ROLE [' + @roleName + '];'\n" +
+ " DECLARE @rolePerm VARCHAR(MAX)\n" +
+ " SET @rolePerm = ''\n" +
+ " SELECT @rolePerm = @rolePerm +\n" +
+ " CASE dp.state\n" +
+ " WHEN 'D' THEN 'DENY '\n" +
+ " WHEN 'G' THEN 'GRANT '\n" +
+ " WHEN 'R' THEN 'REVOKE '\n" +
+ " WHEN 'W' THEN 'GRANT '\n" +
+ " END + \n" +
+ " dp.permission_name + ' ' +\n" +
+ " CASE dp.class\n" +
+ " WHEN 0 THEN ''\n" +
+ " WHEN 1 THEN --table or column subset on the table\n" +
+ " CASE WHEN dp.major_id < 0 THEN\n" +
+ " + 'ON [sys].[' + OBJECT_NAME(dp.major_id) + '] '\n" +
+ " ELSE\n" +
+ " + 'ON [' +\n" +
+ " (SELECT SCHEMA_NAME(schema_id) + '].[' + name FROM sys.objects WHERE object_id = dp.major_id)\n" +
+ " + -- optionally concatenate column names\n" +
+ " CASE WHEN MAX(dp.minor_id) > 0 \n" +
+ " THEN '] ([' + REPLACE(\n" +
+ " (SELECT name + '], [' \n" +
+ " FROM sys.columns \n" +
+ " WHERE object_id = dp.major_id \n" +
+ " AND column_id IN (SELECT minor_id \n" +
+ " FROM sys.database_permissions \n" +
+ " WHERE major_id = dp.major_id\n" +
+ " AND USER_NAME(grantee_principal_id) IN (@roleName)\n" +
+ " )\n" +
+ " FOR XML PATH('')\n" +
+ " ) --replace final square bracket pair\n" +
+ " + '])', ', []', '')\n" +
+ " ELSE ']'\n" +
+ " END + ' '\n" +
+ " END\n" +
+ " WHEN 3 THEN 'ON SCHEMA::[' + SCHEMA_NAME(dp.major_id) + '] '\n" +
+ " WHEN 4 THEN 'ON ' + (SELECT RIGHT(type_desc, 4) + '::[' + name FROM sys.database_principals WHERE principal_id = dp.major_id) + '] '\n" +
+ " WHEN 5 THEN 'ON ASSEMBLY::[' + (SELECT name FROM sys.assemblies WHERE assembly_id = dp.major_id) + '] '\n" +
+ " WHEN 6 THEN 'ON TYPE::[' + (SELECT name FROM sys.types WHERE user_type_id = dp.major_id) + '] '\n" +
+ " WHEN 10 THEN 'ON XML SCHEMA COLLECTION::[' + (SELECT SCHEMA_NAME(schema_id) + '.' + name FROM sys.xml_schema_collections WHERE xml_collection_id = dp.major_id) + '] '\n" +
+ " WHEN 15 THEN 'ON MESSAGE TYPE::[' + (SELECT name FROM sys.service_message_types WHERE message_type_id = dp.major_id) + '] '\n" +
+ " WHEN 16 THEN 'ON CONTRACT::[' + (SELECT name FROM sys.service_contracts WHERE service_contract_id = dp.major_id) + '] '\n" +
+ " WHEN 17 THEN 'ON SERVICE::[' + (SELECT name FROM sys.services WHERE service_id = dp.major_id) + '] '\n" +
+ " WHEN 18 THEN 'ON REMOTE SERVICE BINDING::[' + (SELECT name FROM sys.remote_service_bindings WHERE remote_service_binding_id = dp.major_id) + '] '\n" +
+ " WHEN 19 THEN 'ON ROUTE::[' + (SELECT name FROM sys.routes WHERE route_id = dp.major_id) + '] '\n" +
+ " WHEN 23 THEN 'ON FULLTEXT CATALOG::[' + (SELECT name FROM sys.fulltext_catalogs WHERE fulltext_catalog_id = dp.major_id) + '] '\n" +
+ " WHEN 24 THEN 'ON SYMMETRIC KEY::[' + (SELECT name FROM sys.symmetric_keys WHERE symmetric_key_id = dp.major_id) + '] '\n" +
+ " WHEN 25 THEN 'ON CERTIFICATE::[' + (SELECT name FROM sys.certificates WHERE certificate_id = dp.major_id) + '] '\n" +
+ " WHEN 26 THEN 'ON ASYMMETRIC KEY::[' + (SELECT name FROM sys.asymmetric_keys WHERE asymmetric_key_id = dp.major_id) + '] '\n" +
+ " END COLLATE SQL_Latin1_General_CP1_CI_AS\n" +
+ " + 'TO [' + @roleName + ']' + \n" +
+ " CASE dp.state WHEN 'W' THEN ' WITH GRANT OPTION' ELSE '' END + ';'\n" +
+ " FROM sys.database_permissions dp\n" +
+ " WHERE USER_NAME(dp.grantee_principal_id) IN (@roleName)\n" +
+ " GROUP BY dp.state, dp.major_id, dp.permission_name, dp.class\n" +
+ " SELECT @roleDesc = @roleDesc + CASE WHEN @rolePerm IS NOT NULL THEN @rolePerm ELSE '' END\n" +
+ " RETURN @roleDesc\n" +
+ "END \n"
+ ,
+ "CREATE FUNCTION dbo.GetRoleMembersDDL(@roleName VARCHAR(255))\n" +
+ "RETURNS VARCHAR(MAX)\n" +
+ "BEGIN\n" +
+ " -- Script out the Role\n" +
+ " DECLARE @roleDesc VARCHAR(MAX)\n" +
+ " SET @roleDesc = ''\n" +
+ " -- Display users within Role. Code stubbed by Joe Spivey\n" +
+ " SELECT @roleDesc = @roleDesc + 'EXECUTE sp_AddRoleMember ''' + roles.name + ''', ''' + users.name + ''';' \n" +
+ " FROM sys.database_principals users\n" +
+ " INNER JOIN sys.database_role_members link \n" +
+ " ON link.member_principal_id = users.principal_id\n" +
+ " INNER JOIN sys.database_principals roles \n" +
+ " ON roles.principal_id = link.role_principal_id\n" +
+ " WHERE roles.name = @roleName\n" +
+ " RETURN @roleDesc\n" +
+ "END \n"
+ );
+
+ try (Statement stmt = getConnection().createStatement();) {
+
+ for(String expr : setupQueries){
+ stmt.execute(expr);
+ }
+
+ try(ResultSet rs = stmt.executeQuery(usersQuery);){
+ while(rs.next()){
+ final String name = rs.getString("rolename");
+ final StringProperties options = new StringProperties(rs);
+
+ DBRole role = new DBRole(name, options);
+ listRole.put(name, role);
+ }
+ }
+
+ }catch(Exception e) {
+ final DBGitLang msg = lang.getValue("errors", "adapter", "roles");
+ throw new ExceptionDBGitRunTime(msg, e);
+ }
+ return listRole;
+ }
+
+ @Override
+ public Map getUDTs(String schema) {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public Map getDomains(String schema) {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public Map getEnums(String schema) {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public DBUserDefinedType getUDT(String schema, String name) {
+ final String msg = lang.getValue("errors", "adapter", "objectNotFoundInDb").toString();
+ throw new ExceptionDBGitObjectNotFound(msg);
+ }
+
+ @Override
+ public DBDomain getDomain(String schema, String name) {
+ final String msg = lang.getValue("errors", "adapter", "objectNotFoundInDb").toString();
+ throw new ExceptionDBGitObjectNotFound(msg);
+ }
+
+ @Override
+ public DBEnum getEnum(String schema, String name) {
+ final String msg = lang.getValue("errors", "adapter", "objectNotFoundInDb").toString();
+ throw new ExceptionDBGitObjectNotFound(msg);
+ }
+
+ @Override
+ public boolean userHasRightsToGetDdlOfOtherUsers() {
+ try{
+
+ Connection connect = getConnection();
+ Statement stmt = connect.createStatement();
+ ResultSet rs = stmt.executeQuery(
+ "SELECT CASE WHEN EXISTS " +
+ "(SELECT * FROM fn_my_permissions(NULL, 'DATABASE') WHERE permission_name LIKE '%DEFINITION%') " +
+ "THEN 1 ELSE 0 END hasRights;"
+ );
+ rs.next();
+ boolean hasRights = rs.getBoolean(1);
+ stmt.close();
+ return hasRights;
+
+ }catch(Exception e) {
+ logger.error(lang.getValue("errors", "adapter", "roles") + ": " + e.getMessage());
+ throw new ExceptionDBGitRunTime(lang.getValue("errors", "adapter", "roles") + ": " + e.getMessage());
+ }
+ }
+
+ @Override
+ public IFactoryDBBackupAdapter getBackupAdapterFactory() {
+ return backupFactory;
+ }
+
+ @Override
+ public IFactoryDBConvertAdapter getConvertAdapterFactory() {
+ return convertFactory;
+ }
+
+ @Override
+ public DbType getDbType() {
+ return DbType.MSSQL;
+ }
+
+ @Override
+ public String getDbVersion() {
+ try {
+ Statement stmt = getConnection().createStatement();
+
+ //Gives 8.00, 9.00, 10.00 and 10.50 for SQL 2000, 2005, 2008 and 2008R2 respectively.
+ String query = "SELECT left(cast(serverproperty('productversion') as varchar), 4)";
+
+ ResultSet resultSet = stmt.executeQuery(query);
+ resultSet.next();
+ String result = resultSet.getString(1);
+
+ resultSet.close();
+ stmt.close();
+ return result;
+
+ } catch (SQLException e) { return "";}
+ }
+
+ @Override
+ public void createSchemaIfNeed(String schemaName) throws ExceptionDBGit {
+ try {
+ if(!getSchemes().containsKey(schemaName)) {
+ StatementLogging stLog = new StatementLogging(connect, getStreamOutputSqlCommand(), isExecSql());
+ stLog.execute(
+ "IF NOT EXISTS ( SELECT * FROM sys.schemas WHERE name = N'" + schemaName + "' )\n" +
+ "EXEC('CREATE SCHEMA ["+schemaName+"]');"
+ );
+
+ stLog.close();
+ }
+ } catch (SQLException e) {
+ throw new ExceptionDBGit(lang.getValue("errors", "adapter", "createSchema") + ": " + e.getLocalizedMessage());
+ }
+
+ }
+
+ @Override
+ public void createRoleIfNeed(String roleName) throws ExceptionDBGit {
+ try {
+ if(!getRoles().containsKey(roleName)) {
+ StatementLogging stLog = new StatementLogging(connect, getStreamOutputSqlCommand(), isExecSql());
+ stLog.execute(
+ "IF NOT EXISTS ( SELECT * FROM sys.schemas WHERE name = N'" + roleName + "' )\n" +
+ "EXEC('CREATE ROLE ["+roleName+"]');"
+ );
+
+ stLog.close();
+ }
+ } catch (SQLException e) {
+ throw new ExceptionDBGit(lang.getValue("errors", "adapter", "createSchema") + ": " + e.getLocalizedMessage());
+ }
+
+ }
+
+ @Override
+ public String getDefaultScheme() throws ExceptionDBGit {
+ try{
+ Statement stmt = getConnection().createStatement();
+
+ String query = "SELECT SCHEMA_NAME()";
+
+ ResultSet resultSet = stmt.executeQuery(query);
+ resultSet.next();
+ String result = resultSet.getString(1);
+
+ resultSet.close();
+ stmt.close();
+ return result;
+ }
+ catch (SQLException e){
+ throw new ExceptionDBGit(lang.getValue("errors", "adapter", "createSchema") + ": " + e.getLocalizedMessage());
+ }
+ }
+
+ @Override
+ @SuppressWarnings("SpellCheckingInspection")
+ public boolean isReservedWord(String word) {
+
+ Set reservedWords = new HashSet<>(Arrays.asList(
+ "DD", "EXTERNAL", "PROCEDURE", "ALL", "FETCH", "PUBLIC", "ALTER", "FILE", "RAISERROR",
+ "AND", "FILLFACTOR", "READ", "ANY", "FOR", "READTEXT", "AS", "FOREIGN", "RECONFIGURE",
+ "ASC", "FREETEXT", "REFERENCES", "AUTHORIZATION", "FREETEXTTABLE", "REPLICATION",
+ "BACKUP", "FROM", "RESTORE", "BEGIN", "FULL", "RESTRICT", "BETWEEN", "FUNCTION", "RETURN",
+ "BREAK", "GOTO", "REVERT", "BROWSE", "GRANT", "REVOKE", "BULK", "GROUP", "RIGHT", "BY",
+ "HAVING", "ROLLBACK", "CASCADE", "HOLDLOCK", "ROWCOUNT", "CASE", "IDENTITY", "ROWGUIDCOL",
+ "CHECK", "IDENTITY_INSERT", "RULE", "CHECKPOINT", "IDENTITYCOL", "SAVE", "CLOSE", "IF",
+ "SCHEMA", "CLUSTERED", "IN", "SECURITYAUDIT", "COALESCE", "INDEX", "SELECT", "COLLATE",
+ "INNER", "SEMANTICKEYPHRASETABLE", "COLUMN", "INSERT", "SEMANTICSIMILARITYDETAILSTABLE",
+ "COMMIT", "INTERSECT", "SEMANTICSIMILARITYTABLE", "COMPUTE", "INTO", "SESSION_USER",
+ "CONSTRAINT", "IS", "SET", "CONTAINS", "JOIN", "SETUSER", "CONTAINSTABLE", "KEY", "SHUTDOWN",
+ "CONTINUE", "KILL", "SOME", "CONVERT", "LEFT", "STATISTICS", "CREATE", "LIKE", "SYSTEM_USER",
+ "CROSS", "LINENO", "TABLE", "CURRENT", "LOAD", "TABLESAMPLE", "CURRENT_DATE", "MERGE",
+ "TEXTSIZE", "CURRENT_TIME", "NATIONAL", "THEN", "CURRENT_TIMESTAMP", "NOCHECK", "TO",
+ "CURRENT_USER", "NONCLUSTERED", "В начало", "CURSOR", "NOT", "TRAN", "DATABASE", "NULL",
+ "TRANSACTION", "DBCC", "NULLIF", "TRIGGER", "DEALLOCATE", "OF", "TRUNCATE", "DECLARE",
+ "OFF", "TRY_CONVERT", "DEFAULT", "OFFSETS", "TSEQUAL", "DELETE", "ON", "UNION", "DENY",
+ "OPEN", "UNIQUE", "DESC", "OPENDATASOURCE", "UNPIVOT", "DISK", "OPENQUERY", "UPDATE",
+ "DISTINCT", "OPENROWSET", "UPDATETEXT", "DISTRIBUTED", "OPENXML", "USE", "DOUBLE",
+ "OPTION", "ПОЛЬЗОВАТЕЛЬ", "DROP", "OR", "VALUES", "DUMP", "OVER", "WAITFOR", "ERRLVL",
+ "PERCENT", "PIVOT", "PLAN", "WHILE", "на", "PRINT", "WRITETEXT", "EXIT", "PROC", "OVERLAPS",
+ "ADA", "ADD", "EXTERNAL", "PASCAL", "ALL", "EXTRACT", "POSITION", "PRECISION", "ALTER",
+ "FETCH", "AND", "ANY", "PRIMARY", "FOR", "СЛУЖБЫ", "ANALYSIS SERVICES", "FOREIGN",
+ "ASC", "FORTRAN", "PROCEDURE", "PUBLIC", "FROM", "READ", "AUTHORIZATION", "ПОЛНОЕ",
+ "REAL", "AVG", "REFERENCES", "BEGIN", "BETWEEN", "RESTRICT", "GOTO", "REVOKE", "BIT_LENGTH",
+ "GRANT", "RIGHT", "GROUP", "ROLLBACK", "BY", "HAVING", "CASCADE", "SCHEMA", "IDENTITY",
+ "CASE", "IN", "INCLUDE", "SELECT", "INDEX", "CHAR_LENGTH", "SESSION_USER", "SET",
+ "CHARACTER_LENGTH", "INNER", "CHECK", "CLOSE", "INSENSITIVE", "SOME", "COALESCE", "INSERT",
+ "COLLATE", "SQLCA", "COLUMN", "INTERSECT", "SQLCODE", "COMMIT", "SQLERROR", "INTO", "IS",
+ "CONSTRAINT", "SUBSTRING", "JOIN", "SUM", "CONTINUE", "KEY", "SYSTEM_USER", "CONVERT",
+ "TABLE", "COUNT", "THEN", "CREATE", "LEFT", "CROSS", "TIMESTAMP", "CURRENT", "LIKE",
+ "CURRENT_DATE", "CURRENT_TIME", "LOWER", "Кому", "CURRENT_TIMESTAMP", "CURRENT_USER",
+ "MAX", "TRANSACTION", "CURSOR", "MIN", "TRANSLATE", "TRIM", "DEALLOCATE", "UNION", "NATIONAL",
+ "UNIQUE", "DECLARE", "DEFAULT", "UPDATE", "UPPER", "DELETE", "NONE", "USER", "DESC", "NOT",
+ "NULL", "VALUE", "NULLIF", "VALUES", "OCTET_LENGTH", "VARYING", "DISTINCT", "OF", "VIEW",
+ "ON", "WHEN", "DOUBLE", "DROP", "OPEN", "WHERE", "ELSE", "OPTION", "WITH", "END", "OR", "ORDER",
+ "ESCAPE", "OUTER", "EXCEPT", "ABSOLUTE", "HOST", "RELATIVE", "ACTION", "HOUR", "RELEASE",
+ "ADMIN", "IGNORE", "RESULT", "AFTER", "IMMEDIATE", "RETURNS", "AGGREGATE", "INDICATOR",
+ "ROLE", "ALIAS", "INITIALIZE", "ROLLUP", "ALLOCATE", "INITIALLY", "ROUTINE", "ARE", "INOUT",
+ "ROW", "ARRAY", "INPUT", "ROWS", "ASENSITIVE", "INT", "SAVEPOINT", "ASSERTION", "INTEGER",
+ "SCROLL", "ASYMMETRIC", "INTERSECTION", "SCOPE", "AT", "INTERVAL", "SEARCH", "ATOMIC",
+ "ISOLATION", "SECOND", "BEFORE", "ITERATE", "SECTION", "BINARY", "LANGUAGE", "SENSITIVE",
+ "BIT", "LARGE", "SEQUENCE", "BLOB", "LAST", "SESSION", "BOOLEAN", "LATERAL", "SETS", "BOTH",
+ "LEADING", "SIMILAR", "BREADTH", "LESS", "SIZE", "CALL", "LEVEL", "SMALLINT", "CALLED",
+ "LIKE_REGEX", "SPACE", "CARDINALITY", "LIMIT", "SPECIFIC", "CASCADED", "LN", "SPECIFICTYPE",
+ "CAST", "LOCAL", "SQL", "CATALOG", "LOCALTIME", "SQLEXCEPTION", "CHAR", "LOCALTIMESTAMP",
+ "SQLSTATE", "CHARACTER", "LOCATOR", "SQLWARNING", "CLASS", "MAP", "START", "CLOB", "MATCH",
+ "STATE", "COLLATION", "MEMBER", "STATEMENT", "COLLECT", "METHOD", "STATIC", "COMPLETION",
+ "MINUTE", "STDDEV_POP", "CONDITION", "MOD", "STDDEV_SAMP", "CONNECT", "MODIFIES", "STRUCTURE",
+ "CONNECTION", "MODIFY", "SUBMULTISET", "CONSTRAINTS", "MODULE", "SUBSTRING_REGEX",
+ "CONSTRUCTOR", "MONTH", "SYMMETRIC", "CORR", "MULTISET", "SYSTEM", "CORRESPONDING",
+ "NAMES", "TEMPORARY", "COVAR_POP", "NATURAL", "TERMINATE", "COVAR_SAMP", "NCHAR", "THAN",
+ "CUBE", "NCLOB", "TIME", "CUME_DIST", "NEW", "TIMESTAMP", "CURRENT_CATALOG", "NEXT",
+ "TIMEZONE_HOUR", "CURRENT_DEFAULT_TRANSFORM_GROUP", "NO", "TIMEZONE_MINUTE",
+ "CURRENT_PATH", "None", "TRAILING", "CURRENT_ROLE", "NORMALIZE", "TRANSLATE_REGEX",
+ "CURRENT_SCHEMA", "NUMERIC", "TRANSLATION", "CURRENT_TRANSFORM_GROUP_FOR_TYPE",
+ "OBJECT", "TREAT", "CYCLE", "OCCURRENCES_REGEX", "TRUE", "DATA", "OLD", "UESCAPE", "DATE",
+ "ONLY", "UNDER", "DAY", "OPERATION", "UNKNOWN", "DEC", "ORDINALITY", "UNNEST", "DECIMAL",
+ "OUT", "USAGE", "DEFERRABLE", "OVERLAY", "USING", "DEFERRED", "OUTPUT", "Value", "DEPTH",
+ "PAD", "VAR_POP", "DEREF", "Параметр", "VAR_SAMP", "DESCRIBE", "PARAMETERS", "VARCHAR",
+ "DESCRIPTOR", "PARTIAL", "VARIABLE", "DESTROY", "PARTITION", "WHENEVER", "DESTRUCTOR",
+ "PATH", "WIDTH_BUCKET", "DETERMINISTIC", "POSTFIX", "WITHOUT", "DICTIONARY", "PREFIX",
+ "WINDOW", "DIAGNOSTICS", "PREORDER", "WITHIN", "DISCONNECT", "PREPARE", "WORK", "DOMAIN",
+ "PERCENT_RANK", "WRITE", "DYNAMIC", "PERCENTILE_CONT", "XMLAGG", "EACH", "PERCENTILE_DISC",
+ "XMLATTRIBUTES", "ELEMENT", "POSITION_REGEX", "XMLBINARY", "END-EXEC", "PRESERVE",
+ "XMLCAST", "EQUALS", "PRIOR", "XMLCOMMENT", "EVERY", "PRIVILEGES", "XMLCONCAT",
+ "EXCEPTION", "RANGE", "XMLDOCUMENT", "FALSE", "READS", "XMLELEMENT", "FILTER", "REAL",
+ "XMLEXISTS", "FIRST", "RECURSIVE", "XMLFOREST", "FLOAT", "REF", "XMLITERATE", "FOUND",
+ "REFERENCING", "XMLNAMESPACES", "FREE", "REGR_AVGX", "XMLPARSE", "FULLTEXTTABLE",
+ "REGR_AVGY", "XMLPI", "FUSION", "REGR_COUNT", "XMLQUERY", "GENERAL", "REGR_INTERCEPT",
+ "XMLSERIALIZE", "GET", "REGR_R2", "XMLTABLE", "GLOBAL", "REGR_SLOPE", "XMLTEXT", "GO",
+ "REGR_SXX", "XMLVALIDATE", "GROUPING", "REGR_SXY", "YEAR", "HOLD", "REGR_SYY", "ZONE"
+ ));
+
+
+ String[] words = word.split("\\s");
+ for (String str : words) {
+ if(reservedWords.contains(str.toUpperCase())) return true;
+ }
+ return false;
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBBackupAdapterMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBBackupAdapterMssql.java
new file mode 100644
index 0000000..27e5ee0
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBBackupAdapterMssql.java
@@ -0,0 +1,297 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import ru.fusionsoft.dbgit.adapters.DBBackupAdapter;
+import ru.fusionsoft.dbgit.core.DBGitPath;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.core.SchemaSynonym;
+import ru.fusionsoft.dbgit.dbobjects.DBConstraint;
+import ru.fusionsoft.dbgit.dbobjects.DBIndex;
+import ru.fusionsoft.dbgit.dbobjects.DBTableField;
+import ru.fusionsoft.dbgit.meta.*;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+import ru.fusionsoft.dbgit.utils.StringProperties;
+
+import java.io.File;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.MessageFormat;
+import java.util.Objects;
+
+@SuppressWarnings("Duplicates")
+public class DBBackupAdapterMssql extends DBBackupAdapter {
+
+ @Override
+ public IMetaObject backupDBObject(IMetaObject obj) throws SQLException, ExceptionDBGit {
+
+ Connection connection = adapter.getConnection();
+ StatementLogging stLog = new StatementLogging(connection, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+
+ try {
+ if (obj instanceof MetaSql) {
+ MetaSql metaSql = (MetaSql) obj;
+ String objectName = metaSql.getSqlObject().getName();
+ metaSql.loadFromDB();
+
+ String ddl = metaSql.getSqlObject().getSql();
+ String schema = metaSql.getSqlObject().getSchema();
+
+ if (isSaveToSchema()) {
+ createSchema(stLog, schema);
+ }
+
+ ConsoleWriter.detailsPrintln(lang.getValue("general", "backup", "tryingToCopy").withParams(objectName, getFullDbName(schema, objectName)), messageLevel);
+
+ ddl = ddl.replace(schema + "." + objectName, getFullDbName(schema, objectName));
+
+ stLog.execute(ddl);
+
+ File file = new File(DBGitPath.getFullPath() + metaSql.getFileName());
+ if (file.exists())
+ obj = metaSql.loadFromFile();
+
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ }
+ else if (obj instanceof MetaTable) {
+
+ MetaTable metaTable = (MetaTable) obj;
+ metaTable.loadFromDB();
+ String objectName = metaTable.getTable().getName();
+ String schema = metaTable.getTable().getSchema();
+ schema = (SchemaSynonym.getInstance().getSchema(schema) == null) ? schema : SchemaSynonym.getInstance().getSchema(schema);
+ String tableSam = getFullDbName(schema, objectName);
+ String origTableName = schema + "." + objectName;
+
+
+ if(!isExists(schema, objectName)) {
+ File file = new File(DBGitPath.getFullPath() + metaTable.getFileName());
+ if (file.exists())
+ obj = metaTable.loadFromFile();
+ return obj;
+ }
+
+ if (isSaveToSchema()) {
+ createSchema(stLog, schema);
+ }
+
+ ConsoleWriter.detailsPrintln(lang.getValue("general", "backup", "tryingToCopy").withParams(objectName, getFullDbName(schema, objectName)), messageLevel);
+
+ dropIfExists(isSaveToSchema() ? PREFIX + schema : schema,
+ isSaveToSchema() ? objectName : PREFIX + objectName, stLog);
+
+ String ddl = "";
+ if (isToSaveData()) {
+ // Fields + data
+ ddl = "create table " + tableSam + " as (select * from " + schema + "." + objectName + ")" +";\n";
+
+ // Schema
+ ddl += "alter schema "+ adapter.getDefaultScheme() + " transfer "+ tableSam + ";\n";
+ } else {
+ // Fields
+ ddl ="create table " + tableSam + "(";
+ for (DBTableField field : metaTable.getFields().values()) {
+ ddl += MessageFormat.format("\n[{0}] {1},", field.getName(), field.getTypeSQL());
+ }
+ ddl = ddl.substring(0, ddl.length()-1);
+ ddl += "\n);\n";
+
+ // Schema
+ ddl += "alter schema "+ adapter.getDefaultScheme() + " transfer "+ tableSam + ";\n";
+ }
+
+ for (DBConstraint constraint : metaTable.getConstraints().values()) {
+ String constraintSql = constraint.getSql().replace(
+ "ALTER TABLE " + origTableName + " ADD CONSTRAINT " + constraint.getName(),
+ "alter table "+ tableSam +" add constraint " + PREFIX + constraint.getName() + " "
+ );
+ ddl += constraintSql + "\n";
+ }
+
+ for (DBIndex index : metaTable.getIndexes().values()) {
+ String indexDdl = index.getSql() + "\n";
+ indexDdl = indexDdl
+ .replace(index.getName(), PREFIX + index.getName())
+ .replace("["+objectName+"]", "["+PREFIX+objectName+"]");
+ if (indexDdl.length() > 3)
+ ddl += indexDdl;
+ }
+ stLog.execute(ddl);
+
+ File file = new File(DBGitPath.getFullPath() + metaTable.getFileName());
+ if (file.exists())
+ obj = metaTable.loadFromFile();
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ }
+ else if (obj instanceof MetaSequence) {
+ MetaSequence metaSequence = (MetaSequence) obj;
+ metaSequence.loadFromDB();
+
+ String objectName = metaSequence.getSequence().getName();
+ String schema = metaSequence.getSequence().getSchema();
+
+ if (isSaveToSchema()) {
+ createSchema(stLog, schema);
+ }
+
+ String sequenceName = getFullDbName(schema, objectName);
+
+ ConsoleWriter.detailsPrintln(lang.getValue("general", "backup", "tryingToCopy").withParams(objectName, getFullDbName(schema, objectName)), messageLevel);
+
+ StringProperties props = metaSequence.getSequence().getOptions();
+ String seqName = props.get("name").getData();
+ String seqTypeName = props.get("typename").getData();
+ String seqStart = props.get("start_value").getData();
+ String seqIncr = props.get("increment").getData();
+ StringProperties seqMin = props.get("minimum_value");
+ StringProperties seqMax = props.get("maximum_value");
+ boolean seqCycle = props.get("is_cycling").getData().equals("1");
+ boolean seqHasCache = props.get("is_cached").getData().equals("1");
+ //there may be default cache size
+ StringProperties seqCacheSize = props.get("cache_size");
+ String seqOwner = props.get("owner").getData();
+
+ Objects.requireNonNull(seqTypeName);
+
+ String ddl = "CREATE SEQUENCE " + sequenceName + " AS " + seqTypeName
+ + " START WITH " + seqStart
+ + " INCREMENT BY " + seqIncr
+ + (Objects.nonNull(seqMin) ? " MINVALUE " + seqMin.getData() : " NO MINVALUE ")
+ + (Objects.nonNull(seqMax) ? " MAXVALUE " + seqMax.getData() : " NO MAXVALUE ")
+ + ((seqHasCache)
+ ? " CACHE " + (seqCacheSize != null ? seqCacheSize : " ")
+ : " NO CACHE")
+ + ((seqCycle) ? " CYCLE " : " NO CYCLE " + "\n");
+
+ ddl += MessageFormat.format(
+ "ALTER SCHEMA {0} TRANSFER {1}.{2}",
+ adapter.getConnection().getSchema(), seqOwner, seqName
+ );
+
+ dropIfExists(isSaveToSchema() ? PREFIX + schema : schema,
+ isSaveToSchema() ? objectName : PREFIX + objectName, stLog);
+
+ stLog.execute(ddl);
+
+ File file = new File(DBGitPath.getFullPath() + metaSequence.getFileName());
+ if (file.exists())
+ obj = metaSequence.loadFromFile();
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ }
+
+ } catch (SQLException e1) {
+ throw new ExceptionDBGitRestore(
+ lang.getValue("errors", "restore", "objectRestoreError")
+ .withParams(obj.getName() + ": " + e1.getLocalizedMessage())
+ , e1
+ );
+ } catch (Exception e) {
+ throw new ExceptionDBGit(lang.getValue("errors", "backup", "backupError").withParams(obj.getName()), e);
+ } finally {
+ stLog.close();
+ connection.commit();
+ }
+ return obj;
+ }
+
+ @Override
+ public void restoreDBObject(IMetaObject obj) throws Exception {
+ // TODO Auto-generated method stub
+
+ }
+
+ private String getFullDbName(String schema, String objectName) {
+ if (isSaveToSchema())
+ return PREFIX + schema + "." + objectName;
+ else
+ return schema + "." + PREFIX + objectName;
+ }
+
+ public void dropIfExists(String owner, String objectName, StatementLogging stLog) throws SQLException {
+ String query =
+ "SELECT CASE \n" +
+ "WHEN type IN ('PC', 'P') THEN 'PROCEDURE'\n" +
+ "WHEN type IN ('FN', 'FS', 'FT', 'IF', 'TF') THEN 'FUNCTION' \n" +
+ "WHEN type = 'AF' THEN 'AGGREGATE' \n" +
+ "WHEN type = 'U' THEN 'TABLE' \n" +
+ "WHEN type = 'V' THEN 'VIEW' \n" +
+ "WHEN type IN ('SQ', 'SO') THEN 'SEQUENCE' \n" +
+ "END type\n" +
+ "FROM sys.objects so\n" +
+ "WHERE lower(name) = lower('"+objectName+"') \n" +
+ "AND lower(SCHEMA_NAME(schema_id)) = lower('"+owner+"')";
+
+ try(Statement st = adapter.getConnection().createStatement(); ResultSet rs = st.executeQuery(query)) {
+ while (rs.next()) {
+ String type = rs.getString("type");
+ stLog.execute(MessageFormat.format("DROP {0} {1}.{2}", type, owner, objectName));
+ }
+ }
+
+ }
+
+ @Override
+ public void dropIfExists(IMetaObject imo, StatementLogging stLog) throws SQLException {
+ NameMeta nm = new NameMeta(imo);
+ String typeString = "'none'";
+ switch((DBGitMetaType) nm.getType()){
+ case DBGitTable: typeString = "'TABLE'"; break;
+ case DbGitFunction: typeString = "'FUNCTION'"; break;
+ case DbGitProcedure:typeString = "'PROCEDURE'"; break;
+ case DbGitView: typeString = "'VIEW'"; break;
+ case DBGitSequence: typeString = "'SEQUENCE'"; break;
+ }
+
+ String query = MessageFormat.format(
+ "SELECT CASE \n" +
+ "WHEN type IN ('PC', 'P') THEN 'PROCEDURE'\n" +
+ "WHEN type IN ('FN', 'FS', 'FT', 'IF', 'TF', 'AF') THEN 'FUNCTION' \n" +
+ "WHEN type = 'U' THEN 'TABLE' \n" +
+ "WHEN type = 'V' THEN 'VIEW' \n" +
+ "WHEN type IN ('SQ', 'SO') THEN 'SEQUENCE' \n" +
+ "END type\n" +
+ "FROM sys.objects so\n" +
+ "WHERE lower(SCHEMA_NAME(schema_id)) = lower('{0}') \n" +
+ "AND lower(name) = lower('{1}') \n" +
+ "AND type IN ({2})",
+ nm.getSchema(),
+ nm.getName(),
+ typeString
+ );
+
+ try(Statement st = adapter.getConnection().createStatement(); ResultSet rs = st.executeQuery(query)) {
+ while (rs.next()) {
+ String type = rs.getString("type");
+ stLog.execute(MessageFormat.format("DROP {0} {1}.{2}", type, nm.getSchema(), nm.getName()));
+ }
+ }
+ }
+
+ @Override
+ public boolean isExists(String owner, String objectName) throws SQLException {
+ Statement st = adapter.getConnection().createStatement();
+ ResultSet rs = st.executeQuery(
+ "SELECT CASE WHEN OBJECT_ID('"+owner+"."+objectName+"') IS NOT NULL THEN 1 ELSE 0 END"
+ );
+
+ rs.next();
+ return rs.getInt(1) == 1;
+ }
+
+ @Override
+ public boolean createSchema(StatementLogging stLog, String schema) {
+ try {
+ if (!adapter.getSchemes().containsKey(schema)) {
+ ConsoleWriter.detailsPrintln(lang.getValue("general", "backup", "creatingSchema").withParams(PREFIX + schema), messageLevel);
+ stLog.execute(MessageFormat.format("CREATE SCHEMA {0}{1}", PREFIX, schema));
+ }
+ return true;
+ } catch (SQLException e) {
+ ConsoleWriter.println(lang.getValue("errors", "backup", "cannotCreateSchema").withParams(e.getLocalizedMessage()), messageLevel);
+ return false;
+ }
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreFunctionMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreFunctionMssql.java
new file mode 100644
index 0000000..3bde897
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreFunctionMssql.java
@@ -0,0 +1,75 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import ru.fusionsoft.dbgit.adapters.DBRestoreAdapter;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.dbobjects.DBFunction;
+import ru.fusionsoft.dbgit.dbobjects.DBSQLObject;
+import ru.fusionsoft.dbgit.meta.IMetaObject;
+import ru.fusionsoft.dbgit.meta.MetaFunction;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+
+import java.sql.Connection;
+import java.text.MessageFormat;
+import java.util.Map;
+
+public class DBRestoreFunctionMssql extends DBRestoreAdapter {
+
+ @Override
+ public boolean restoreMetaObject(IMetaObject obj, int step) throws Exception {
+ IDBAdapter adapter = getAdapter();
+ Connection connect = adapter.getConnection();
+ StatementLogging st = new StatementLogging(connect, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+ try {
+ if (obj instanceof MetaFunction) {
+ MetaFunction restoreFunction = (MetaFunction)obj;
+ DBSQLObject restoringDBF = restoreFunction.getSqlObject();
+ String functionName = restoreFunction.getSqlObject().getName();
+ Map functions = adapter.getFunctions(restoreFunction.getSqlObject().getSchema());
+
+ if(functions.containsKey(functionName)){
+ DBFunction existingDBF = functions.get(functionName);
+ boolean ddlsDiffer = !restoringDBF.getSql().equals(existingDBF.getSql());
+ boolean ownersDiffer = !restoringDBF.getOwner().equals(existingDBF.getOwner());
+
+ if(ddlsDiffer) {
+ st.execute(MessageFormat.format("DROP FUNCTION {0}.{1}", existingDBF.getOwner(), existingDBF.getName()));
+ st.execute(restoreFunction.getSqlObject().getSql());
+ }
+ if(ownersDiffer) {
+ //TODO remove sp_changeowner usage in other methods
+ String ddl = MessageFormat.format(
+ "ALTER SCHEMA {0} TRANSFER {1}.{2}",
+ restoringDBF.getOwner(), existingDBF.getOwner(), functionName
+ );
+ st.execute(ddl);
+ }
+ } else {
+ st.execute(restoreFunction.getSqlObject().getSql());
+ }
+ //TODO Восстановление привилегий
+ }
+ else
+ {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "function", obj.getType().getValue()
+ ));
+ }
+ } catch (Exception e) {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "objectRestoreError").withParams(obj.getName()), e);
+ } finally {
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ st.close();
+ }
+ return true;
+ }
+
+ @Override
+ public void removeMetaObject(IMetaObject obj) throws Exception {
+ // TODO Auto-generated method stub
+
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreProcedureMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreProcedureMssql.java
new file mode 100644
index 0000000..5e50e36
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreProcedureMssql.java
@@ -0,0 +1,69 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import ru.fusionsoft.dbgit.adapters.DBRestoreAdapter;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.dbobjects.DBProcedure;
+import ru.fusionsoft.dbgit.dbobjects.DBSQLObject;
+import ru.fusionsoft.dbgit.meta.IMetaObject;
+import ru.fusionsoft.dbgit.meta.MetaProcedure;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+
+import java.sql.Connection;
+import java.text.MessageFormat;
+import java.util.Map;
+
+public class DBRestoreProcedureMssql extends DBRestoreAdapter {
+
+ @Override
+ public boolean restoreMetaObject(IMetaObject obj, int step) throws Exception {
+ IDBAdapter adapter = getAdapter();
+ Connection connect = adapter.getConnection();
+ StatementLogging st = new StatementLogging(connect, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+ try {
+ if (obj instanceof MetaProcedure) {
+ MetaProcedure restoreProcedure = (MetaProcedure)obj;
+ DBSQLObject restoringProc = restoreProcedure.getSqlObject();
+ String procedureName = restoringProc.getName();
+ String procedureSchema = restoringProc.getSchema();
+
+ if(adapter.getProcedures(procedureSchema).containsKey(procedureName)) {
+ DBProcedure existingProc = adapter.getProcedure(procedureSchema, procedureName);
+
+ if(!restoringProc.getSql().equals(existingProc.getSql())) {
+ st.execute(MessageFormat.format("DROP PROCEDURE {0}.{1}", existingProc.getOwner(), existingProc.getName()));
+ st.execute(restoreProcedure.getSqlObject().getSql(), "/");
+ //TODO Восстановление привилегий
+ }
+ }
+ else{
+ st.execute(restoreProcedure.getSqlObject().getSql(), "/");
+ //TODO Восстановление привилегий
+ }
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ }
+ else
+ {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "procedure", obj.getType().getValue()
+ ));
+ }
+
+ }
+ catch (Exception e) {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "objectRestoreError").withParams(obj.getName()), e);
+ } finally {
+ st.close();
+ }
+
+ return true;
+ }
+
+ @Override
+ public void removeMetaObject(IMetaObject obj) throws Exception {
+ // TODO Auto-generated method stub
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreRoleMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreRoleMssql.java
new file mode 100644
index 0000000..e8be46f
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreRoleMssql.java
@@ -0,0 +1,82 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import ru.fusionsoft.dbgit.adapters.DBRestoreAdapter;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.dbobjects.DBRole;
+import ru.fusionsoft.dbgit.meta.IMetaObject;
+import ru.fusionsoft.dbgit.meta.MetaRole;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+import ru.fusionsoft.dbgit.utils.StringProperties;
+
+import java.sql.Connection;
+import java.text.MessageFormat;
+import java.util.Map;
+
+public class DBRestoreRoleMssql extends DBRestoreAdapter{
+
+ @Override
+ public boolean restoreMetaObject(IMetaObject obj, int step) throws Exception {
+ IDBAdapter adapter = getAdapter();
+ Connection connect = adapter.getConnection();
+ StatementLogging st = new StatementLogging(connect, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+ try {
+ if (obj instanceof MetaRole) {
+ MetaRole restoreRole = (MetaRole)obj;
+ Map roles = adapter.getRoles();
+ StringProperties opts = restoreRole.getObjectOption().getOptions();
+ String restoreDdl = opts.get("ddl") != null ? opts.get("ddl").getData() : "";
+ String restoreRoleName = restoreRole.getObjectOption().getName();
+ String simpleCreateRoleDdl = MessageFormat.format("CREATE ROLE [{0}];", restoreRoleName);
+
+ boolean exist = false;
+
+ if(!(roles.isEmpty() || roles == null)) {
+ for(DBRole role:roles.values()) {
+
+ if(restoreRole.getObjectOption().getName().equals(role.getName())){
+ exist = true;
+
+ String existingDdl = role.getOptions().get("ddl") != null ? role.getOptions().get("ddl").getData() : "";
+ boolean isEqualDdls = restoreDdl
+ .replaceAll("\\s+", "")
+ .equals(existingDdl.replaceAll("\\s+", ""));
+
+ if(!isEqualDdls){
+ if(!restoreDdl.isEmpty()) st.execute(restoreDdl);
+ else st.execute(simpleCreateRoleDdl);
+ //TODO Восстановление привилегий вместо simpleCreateRoleDdl
+ }
+ }
+ }
+ }
+
+ if(!exist){
+ if(!restoreDdl.isEmpty()) st.execute(restoreDdl);
+ else st.execute(simpleCreateRoleDdl);
+ }
+ connect.commit();
+ }
+ else {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "role", obj.getType().getValue()
+ ));
+ }
+ } catch (Exception e) {
+ ConsoleWriter.detailsPrintlnRed(lang.getValue("errors", "meta", "fail"), 0);
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "objectRestoreError").withParams(obj.getName()), e);
+ } finally {
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ st.close();
+ }
+ return true;
+ }
+ @Override
+ public void removeMetaObject(IMetaObject obj) throws Exception {
+ // TODO Auto-generated method stub
+
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreSchemaMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreSchemaMssql.java
new file mode 100644
index 0000000..356e63e
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreSchemaMssql.java
@@ -0,0 +1,70 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import ru.fusionsoft.dbgit.adapters.DBRestoreAdapter;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.DBGitConfig;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.dbobjects.DBSchema;
+import ru.fusionsoft.dbgit.meta.IMetaObject;
+import ru.fusionsoft.dbgit.meta.MetaSchema;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+
+import java.sql.Connection;
+import java.util.Map;
+
+
+public class DBRestoreSchemaMssql extends DBRestoreAdapter {
+ @Override
+ public boolean restoreMetaObject(IMetaObject obj, int step) throws Exception {
+ IDBAdapter adapter = getAdapter();
+ Connection connect = adapter.getConnection();
+ StatementLogging st = new StatementLogging(connect, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+ try {
+ if (obj instanceof MetaSchema) {
+ MetaSchema restoreSchema = (MetaSchema)obj;
+ Map schs = adapter.getSchemes();
+ boolean exist = false;
+ if(!(schs.isEmpty() || schs == null)) {
+ for(DBSchema sch:schs.values()) {
+ if(restoreSchema.getObjectOption().getName().equals(sch.getName())){
+ exist = true;
+ // TODO MSSQL restore Schema script
+
+ if(!DBGitConfig.getInstance().getToIgnoreOnwer(false)){
+ if(!restoreSchema.getObjectOption().getOptions().getChildren().get("usename").getData().equals(sch.getOptions().getChildren().get("usename").getData())) {
+ st.execute("ALTER SCHEMA "+ restoreSchema.getObjectOption().getName() +" OWNER TO "+
+ restoreSchema.getObjectOption().getOptions().getChildren().get("usename").getData());
+ }
+ }
+ }
+ }
+ }
+ if(!exist){
+
+ st.execute("CREATE SCHEMA "+restoreSchema.getObjectOption().getName() +" AUTHORIZATION "+
+ restoreSchema.getObjectOption().getOptions().getChildren().get("usename").getData());
+ //TODO Восстановление привилегий
+ }
+ }
+ else
+ {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "schema", obj.getType().getValue()
+ ));
+ }
+ } catch (Exception e) {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "objectRestoreError").withParams(obj.getName()), e);
+ } finally {
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ st.close();
+ }
+ return true;
+ }
+
+ public void removeMetaObject(IMetaObject obj) {
+ // TODO Auto-generated method stub
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreSequenceMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreSequenceMssql.java
new file mode 100644
index 0000000..5b48f14
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreSequenceMssql.java
@@ -0,0 +1,102 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import ru.fusionsoft.dbgit.adapters.DBRestoreAdapter;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.dbobjects.DBSequence;
+import ru.fusionsoft.dbgit.meta.IMetaObject;
+import ru.fusionsoft.dbgit.meta.MetaSequence;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+import ru.fusionsoft.dbgit.utils.StringProperties;
+
+import java.sql.Connection;
+import java.text.MessageFormat;
+import java.util.Map;
+import java.util.Objects;
+
+public class DBRestoreSequenceMssql extends DBRestoreAdapter {
+
+ @Override
+ public boolean restoreMetaObject(IMetaObject obj, int step) throws Exception {
+ IDBAdapter adapter = getAdapter();
+ Connection connect = adapter.getConnection();
+ StatementLogging st = new StatementLogging(connect, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+ try {
+ if (obj instanceof MetaSequence) {
+
+ MetaSequence restoreSeq = (MetaSequence)obj;
+ String seqSchema = restoreSeq.getSequence().getSchema();
+ StringProperties props = restoreSeq.getSequence().getOptions();
+ String seqName = props.get("name").getData();
+ String sequenceSam = seqSchema+"."+seqName;
+
+
+ //check existence
+ Map seqs = adapter.getSequences(seqSchema);
+ boolean exist = false;
+ for(DBSequence seq:seqs.values()){
+ String currentName = seq.getOptions().getChildren().get("name").getData();
+ if(currentName.equalsIgnoreCase(seqName)){
+ exist = true;
+ break;
+ }
+ }
+
+ String ddl = exist
+ ? ("ALTER SEQUENCE " + sequenceSam)
+ : ("CREATE SEQUENCE " + props.get("name").getData());
+
+ ddl += " AS " + props.get("typename").getData()
+ + " START WITH " + props.get("start_value").getData()
+ + " INCREMENT BY " + props.get("increment").getData()
+ + ( Objects.nonNull(props.get("minimum_value"))
+ ? " MINVALUE " + props.get("minimum_value").getData()
+ : " NO MINVALUE "
+ )
+ + (Objects.nonNull(props.get("maximum_value"))
+ ? " MAXVALUE " + props.get("maximum_value").getData()
+ : " NO MAXVALUE "
+ )
+ + ((props.get("is_cached").getData().equals("1"))
+ ? " CACHE " + (props.get("cache_size") != null
+ ? props.get("cache_size").getData() : " " )
+ : " NO CACHE")
+ + ((props.get("is_cycling").getData().equals("1"))
+ ? " CYCLE "
+ : " NO CYCLE "
+ + "\n");
+
+ ddl += MessageFormat.format(
+ "ALTER SCHEMA {0} TRANSFER {1}.{2}",
+ adapter.getConnection().getSchema(),
+ props.get("owner").getData(),
+ props.get("name").getData()
+ );
+
+ //TODO Восстановление привилегий
+ st.execute(ddl);
+ }
+ else
+ {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "sequence", obj.getType().getValue()
+ ));
+ }
+ } catch (Exception e) {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "objectRestoreError").withParams(obj.getName()), e);
+ } finally {
+ ConsoleWriter.detailsPrintGreen(lang.getValue("general", "ok"));
+ st.close();
+ }
+ return true;
+ }
+
+ @Override
+ public void removeMetaObject(IMetaObject obj) throws Exception {
+ // TODO Auto-generated method stub
+
+ }
+
+}
diff --git a/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreTableDataMssql.java b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreTableDataMssql.java
new file mode 100644
index 0000000..cd0e06a
--- /dev/null
+++ b/src/main/java/ru/fusionsoft/dbgit/mssql/DBRestoreTableDataMssql.java
@@ -0,0 +1,593 @@
+package ru.fusionsoft.dbgit.mssql;
+
+import com.google.common.collect.MapDifference;
+import com.google.common.collect.MapDifference.ValueDifference;
+import com.google.common.collect.Maps;
+import ru.fusionsoft.dbgit.adapters.DBRestoreAdapter;
+import ru.fusionsoft.dbgit.adapters.IDBAdapter;
+import ru.fusionsoft.dbgit.core.ExceptionDBGit;
+import ru.fusionsoft.dbgit.core.ExceptionDBGitRestore;
+import ru.fusionsoft.dbgit.core.GitMetaDataManager;
+import ru.fusionsoft.dbgit.core.SchemaSynonym;
+import ru.fusionsoft.dbgit.data_table.*;
+import ru.fusionsoft.dbgit.dbobjects.DBConstraint;
+import ru.fusionsoft.dbgit.meta.IMetaObject;
+import ru.fusionsoft.dbgit.meta.MetaTable;
+import ru.fusionsoft.dbgit.meta.MetaTableData;
+import ru.fusionsoft.dbgit.statement.PrepareStatementLogging;
+import ru.fusionsoft.dbgit.statement.StatementLogging;
+import ru.fusionsoft.dbgit.utils.ConsoleWriter;
+
+import java.io.*;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.text.SimpleDateFormat;
+import java.util.*;
+
+public class DBRestoreTableDataMssql extends DBRestoreAdapter {
+
+ @Override
+ public boolean restoreMetaObject(IMetaObject obj, int step) throws Exception {
+ if (obj instanceof MetaTableData) {
+ MetaTableData currentTableData;
+ MetaTableData restoreTableData = (MetaTableData)obj;
+ GitMetaDataManager gitMetaMng = GitMetaDataManager.getInstance();
+ //TODO не факт что в кеше есть мета описание нашей таблицы, точнее ее не будет если при старте ресторе таблицы в бд не было совсем
+
+ IMetaObject currentMetaObj = gitMetaMng.getCacheDBMetaObject(obj.getName());
+
+ if (currentMetaObj instanceof MetaTableData || currentMetaObj == null) {
+
+ if(Integer.valueOf(step).equals(0)) {
+ removeTableConstraintsMssql(restoreTableData.getMetaTable());
+ return false;
+ }
+ if(Integer.valueOf(step).equals(1)) {
+ String schema = getPhisicalSchema(restoreTableData.getTable().getSchema());
+ schema = (SchemaSynonym.getInstance().getSchema(schema) == null) ? schema : SchemaSynonym.getInstance().getSchema(schema);
+
+ if (currentMetaObj != null) {
+ currentTableData = (MetaTableData) currentMetaObj;
+ } else {
+ currentTableData = new MetaTableData();
+ currentTableData.setTable(restoreTableData.getTable());
+ currentTableData.getTable().setSchema(schema);
+
+ currentTableData.setMapRows(new TreeMapRowData());
+ currentTableData.setDataTable(restoreTableData.getDataTable());
+ }
+ currentTableData.getmapRows().clear();
+
+ if (getAdapter().getTable(schema, currentTableData.getTable().getName()) != null) {
+ currentTableData.setDataTable(getAdapter().getTableData(schema, currentTableData.getTable().getName()));
+
+ ResultSet rs = currentTableData.getDataTable().resultSet();
+
+ TreeMapRowData mapRows = new TreeMapRowData();
+
+ MetaTable metaTable = new MetaTable(currentTableData.getTable());
+ metaTable.loadFromDB(currentTableData.getTable());
+
+ if (rs != null) {
+ while(rs.next()) {
+ RowData rd = new RowData(rs, metaTable);
+ mapRows.put(rd.calcRowKey(metaTable.getIdColumns()), rd);
+ }
+ }
+ currentTableData.setMapRows(mapRows);
+ }
+ /*
+ ConsoleWriter.println("curr:");
+ currentTableData.getmapRows().keySet().forEach(key -> ConsoleWriter.println("hash: " + key));
+ ConsoleWriter.println("rest:");
+ restoreTableData.getmapRows().keySet().forEach(key -> ConsoleWriter.println("hash: " + key));
+ */
+ restoreTableDataMssql(restoreTableData,currentTableData);
+ return false;
+ }
+ if(Integer.valueOf(step).equals(-2)) {
+ restoreTableConstraintMssql(restoreTableData.getMetaTable());
+ return false;
+ }
+ return true;
+ }
+ else
+ {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "table data cached", obj.getType().getValue()
+ ));
+ }
+ }
+ else {
+ throw new ExceptionDBGitRestore(lang.getValue("errors", "restore", "metaTypeError").withParams(
+ obj.getName()
+ , "table data", obj.getType().getValue()
+ ));
+ }
+ }
+
+ public void restoreTableDataMssql(MetaTableData restoreTableData, MetaTableData currentTableData) throws Exception{
+ IDBAdapter adapter = getAdapter();
+ Connection connect = adapter.getConnection();
+ StatementLogging st = new StatementLogging(connect, adapter.getStreamOutputSqlCommand(), adapter.isExecSql());
+ try {
+ if (restoreTableData.getmapRows() == null)
+ restoreTableData.setMapRows(new TreeMapRowData());
+
+ String fields = "";
+ if (restoreTableData.getmapRows().size() > 0)
+ fields = keysToString(restoreTableData.getmapRows().firstEntry().getValue().getData().keySet()) + " values ";
+ MapDifference