com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement Java Examples
The following examples show how to use
com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AutoCompensateHandler.java From txle with Apache License 2.0 | 5 votes |
@Override public void prepareCompensationAfterExecuting(PreparedStatement delegate, String executeSql, Map<String, Object> standbyParams) throws SQLException { String globalTxId = CurrentThreadOmegaContext.getGlobalTxIdFromCurThread(); if (globalTxId == null || globalTxId.length() == 0) { return; } String localTxId = CurrentThreadOmegaContext.getLocalTxIdFromCurThread(); if (localTxId == null || localTxId.length() == 0) { return; } // To parse SQL by SQLParser tools from Druid. MySqlStatementParser parser = new MySqlStatementParser(executeSql); SQLStatement sqlStatement = parser.parseStatement(); if (sqlStatement instanceof MySqlSelectIntoStatement) { return; } if (standbyParams == null) { standbyParams = new HashMap<>(8); } String server = CurrentThreadOmegaContext.getServiceNameFromCurThread(); // To set a relationship between localTxId and datSourceInfo, in order to determine to use the relative dataSource for localTxId when it need be compensated. DatabaseMetaData databaseMetaData = delegate.getConnection().getMetaData(); String dburl = databaseMetaData.getURL(), dbusername = databaseMetaData.getUserName(), dbdrivername = databaseMetaData.getDriverName(); DataSourceMappingCache.putLocalTxIdAndDataSourceInfo(localTxId, dburl, dbusername, dbdrivername); // To construct kafka message. standbyParams.put("dbdrivername", dbdrivername); standbyParams.put("dburl", dburl); standbyParams.put("dbusername", dbusername); if (sqlStatement instanceof MySqlInsertStatement) { AutoCompensateInsertHandler.newInstance().prepareCompensationAfterInserting(delegate, sqlStatement, executeSql, globalTxId, localTxId, server, standbyParams); } else if (sqlStatement instanceof MySqlUpdateStatement) { AutoCompensateUpdateHandler.newInstance().prepareCompensationAfterUpdating(delegate, sqlStatement, executeSql, globalTxId, localTxId, server, standbyParams); } }
Example #2
Source File: CompensateService.java From txle with Apache License 2.0 | 5 votes |
private String parseTableName(SQLStatement sqlStatement) { if (sqlStatement instanceof MySqlInsertStatement) { return ((MySqlInsertStatement) sqlStatement).getTableName().toString(); } else if (sqlStatement instanceof MySqlDeleteStatement) { return ((MySqlDeleteStatement) sqlStatement).getTableName().toString(); } else if (sqlStatement instanceof MySqlUpdateStatement) { return ((MySqlUpdateStatement) sqlStatement).getTableName().toString(); } return ""; }
Example #3
Source File: DruidParserFactory.java From Mycat2 with GNU General Public License v3.0 | 5 votes |
public static DruidParser create(SchemaConfig schema, SQLStatement statement, SchemaStatVisitor visitor) { DruidParser parser = null; if (statement instanceof SQLSelectStatement) { if(schema.isNeedSupportMultiDBType()) { parser = getDruidParserForMultiDB(schema, statement, visitor); } if (parser == null) { parser = new DruidSelectParser(); } } else if (statement instanceof MySqlInsertStatement) { parser = new DruidInsertParser(); } else if (statement instanceof MySqlDeleteStatement) { parser = new DruidDeleteParser(); } else if (statement instanceof MySqlCreateTableStatement) { parser = new DruidCreateTableParser(); } else if (statement instanceof MySqlUpdateStatement) { parser = new DruidUpdateParser(); } else if (statement instanceof SQLAlterTableStatement) { parser = new DruidAlterTableParser(); } else if (statement instanceof MySqlLockTableStatement) { parser = new DruidLockTableParser(); } else { parser = new DefaultDruidParser(); } return parser; }
Example #4
Source File: DruidUpdateParserTest.java From Mycat2 with GNU General Public License v3.0 | 5 votes |
public void throwExceptionParse(String sql, boolean throwException) throws NoSuchMethodException { MySqlStatementParser parser = new MySqlStatementParser(sql); List<SQLStatement> statementList = parser.parseStatementList(); SQLStatement sqlStatement = statementList.get(0); MySqlUpdateStatement update = (MySqlUpdateStatement) sqlStatement; SchemaConfig schemaConfig = mock(SchemaConfig.class); Map<String, TableConfig> tables = mock(Map.class); TableConfig tableConfig = mock(TableConfig.class); String tableName = "hotnews"; when((schemaConfig).getTables()).thenReturn(tables); when(tables.get(tableName)).thenReturn(tableConfig); when(tableConfig.getParentTC()).thenReturn(null); RouteResultset routeResultset = new RouteResultset(sql, 11); Class c = DruidUpdateParser.class; Method method = c.getDeclaredMethod("confirmShardColumnNotUpdated", new Class[]{SQLUpdateStatement.class, SchemaConfig.class, String.class, String.class, String.class, RouteResultset.class}); method.setAccessible(true); try { method.invoke(c.newInstance(), update, schemaConfig, tableName, "ID", "", routeResultset); if (throwException) { System.out.println("未抛异常,解析通过则不对!"); Assert.assertTrue(false); } else { System.out.println("未抛异常,解析通过,此情况分片字段可能在update语句中但是实际不会被更新"); Assert.assertTrue(true); } } catch (Exception e) { if (throwException) { System.out.println(e.getCause().getClass()); Assert.assertTrue(e.getCause() instanceof SQLNonTransientException); System.out.println("抛异常原因为SQLNonTransientException则正确"); } else { System.out.println("抛异常,需要检查"); Assert.assertTrue(false); } } }
Example #5
Source File: SchemaUtil.java From dble with GNU General Public License v2.0 | 5 votes |
private static boolean isNoSharding(ServerConnection source, SQLExprTableSource table, SQLStatement stmt, SQLStatement childSelectStmt, String contextSchema, Set<String> schemas, StringPtr dataNode) throws SQLException { SchemaInfo schemaInfo = SchemaUtil.getSchemaInfo(source.getUser(), contextSchema, table); String currentSchema = schemaInfo.schema.toUpperCase(); if (SchemaUtil.MYSQL_SYS_SCHEMA.contains(currentSchema)) { schemas.add(currentSchema); return false; } ServerPrivileges.CheckType checkType = ServerPrivileges.CheckType.SELECT; if (childSelectStmt instanceof MySqlUpdateStatement) { checkType = ServerPrivileges.CheckType.UPDATE; } else if (childSelectStmt instanceof SQLSelectStatement) { checkType = ServerPrivileges.CheckType.SELECT; } else if (childSelectStmt instanceof MySqlDeleteStatement) { checkType = ServerPrivileges.CheckType.DELETE; } if (!ServerPrivileges.checkPrivilege(source, schemaInfo.schema, schemaInfo.table, checkType)) { String msg = "The statement DML privilege check is not passed, sql:" + stmt.toString().replaceAll("[\\t\\n\\r]", " "); throw new SQLNonTransientException(msg); } String noShardingNode = RouterUtil.isNoSharding(schemaInfo.schemaConfig, schemaInfo.table); schemas.add(schemaInfo.schema); if (noShardingNode == null) { return false; } else if (dataNode.get() == null) { dataNode.set(noShardingNode); return true; } else { return dataNode.get().equals(noShardingNode); } }
Example #6
Source File: DruidUpdateParserTest.java From dble with GNU General Public License v2.0 | 5 votes |
public void throwExceptionParse(String sql, boolean throwException) throws NoSuchMethodException { MySqlStatementParser parser = new MySqlStatementParser(sql); List<SQLStatement> statementList = parser.parseStatementList(); SQLStatement sqlStatement = statementList.get(0); MySqlUpdateStatement update = (MySqlUpdateStatement) sqlStatement; SchemaConfig schemaConfig = mock(SchemaConfig.class); Map<String, TableConfig> tables = mock(Map.class); TableConfig tableConfig = mock(TableConfig.class); String tableName = "hotnews"; when((schemaConfig).getTables()).thenReturn(tables); when(tables.get(tableName)).thenReturn(tableConfig); when(tableConfig.getParentTC()).thenReturn(null); RouteResultset routeResultset = new RouteResultset(sql, 11); Class c = DruidUpdateParser.class; Method method = c.getDeclaredMethod("confirmShardColumnNotUpdated", new Class[]{SQLUpdateStatement.class, SchemaConfig.class, String.class, String.class, String.class, RouteResultset.class}); method.setAccessible(true); try { method.invoke(c.newInstance(), update, schemaConfig, tableName, "ID", "", routeResultset); if (throwException) { System.out.println("Not passed without exception is not correct"); Assert.assertTrue(false); } else { System.out.println("Passed without exception. Maybe the partition key exists in update statement,but not update in fact"); Assert.assertTrue(true); } } catch (Exception e) { if (throwException) { System.out.println(e.getCause().getClass()); Assert.assertTrue(e.getCause() instanceof SQLNonTransientException); System.out.println("SQLNonTransientException is expected"); } else { System.out.println("need checked"); Assert.assertTrue(false); } } }
Example #7
Source File: MySqlUpdateParser.java From baymax with Apache License 2.0 | 5 votes |
/** * 判断某个列是否能被update,分区表的分区列是不能被update的. */ protected void checkUpdateColumn(){ MySqlUpdateStatement update = (MySqlUpdateStatement)statement; String tableName = StringUtil.removeBackquote(update.getTableName().getSimpleName()); List<SQLUpdateSetItem> updateSetItem = update.getItems(); String[] partitionColumns = BaymaxContext.getPartitionColumns(tableName); if(partitionColumns != null && partitionColumns.length > 0 && updateSetItem != null && updateSetItem.size() > 0) { for(SQLUpdateSetItem item : updateSetItem) { String column = StringUtil.removeBackquote(item.getColumn().toString()); if (StringUtil.contains(partitionColumns, column)){ throw new BayMaxException("分区表的分区键不能被更新:" + tableName + "." + column); } } } }
Example #8
Source File: PaserExecutor.java From dts with Apache License 2.0 | 4 votes |
public static SQLType parse(StatementAdapter txcStatement) throws SQLException { long start = System.currentTimeMillis(); SQLType sqlType = SQLType.SELECT; try { DbRuntimeContext txcRuntimeContext = txcStatement.getConnection().getConnectionRuntimeContext(); String sql = txcStatement.getSql(); SQLStatement sqlParseStatement = new MySqlStatementParser(sql).parseStatement(); CommitInfo commitInfo = null; if (sqlParseStatement instanceof MySqlUpdateStatement) { commitInfo = UpdateParser.getInstance().parse(txcStatement); sqlType = SQLType.UPDATE; if (!Objects.isNull(commitInfo)&&!CollectionUtils.isEmpty(commitInfo.getOriginalValue().getLine())) { txcRuntimeContext.getInfo().add(commitInfo); fillDbMetaAndLockRow(txcStatement, commitInfo); } } else if (sqlParseStatement instanceof MySqlInsertStatement) { sqlType = SQLType.INSERT; } else if (sqlParseStatement instanceof MySqlDeleteStatement) { commitInfo = DeleteParser.getInstance().parse(txcStatement); sqlType = SQLType.DELETE; if (!Objects.isNull(commitInfo) && !CollectionUtils.isEmpty(commitInfo.getOriginalValue().getLine())) { txcRuntimeContext.getInfo().add(commitInfo); fillDbMetaAndLockRow(txcStatement, commitInfo); } } else if (sqlParseStatement instanceof SQLSelectStatement) { SQLSelectQueryBlock selectQueryBlock = ((SQLSelectStatement)sqlParseStatement).getSelect().getQueryBlock(); if (selectQueryBlock.getFrom() != null) { SelectParser.getInstance().parse(txcStatement); sqlType = SQLType.SELECT; } } } catch (Exception e) { logger.error("parse sql error", e); if (e instanceof SQLException || e instanceof RuntimeException) { throw e; } else { throw new SQLException(e); } } finally { long cost = System.currentTimeMillis() - start; if (sqlType != SQLType.SELECT || cost > 50) { logger.debug("parser sql:{}, cost:{}ms", txcStatement.getSql(), cost); } } return sqlType; }
Example #9
Source File: AutoCompensateService.java From txle with Apache License 2.0 | 4 votes |
private boolean checkDataConsistency(String compensateSql, String globalTxId, String localTxId) throws NoSuchAlgorithmException, IOException { MySqlStatementParser parser = new MySqlStatementParser(compensateSql); SQLStatement sqlStatement = parser.parseStatement(); if (sqlStatement instanceof MySqlUpdateStatement || sqlStatement instanceof MySqlInsertStatement) { MySqlUpdateStatement deleteStatement = (MySqlUpdateStatement) sqlStatement; String tableName = deleteStatement.getTableName().toString().toLowerCase(); String schema = TxleConstants.APP_NAME; String txleBackupTableName = "backup_new_" + tableName; int backupDataCount = autoCompensateDao.executeQueryCount("SELECT count(*) FROM " + schema + "." + txleBackupTableName + " T WHERE T.globalTxId = ? AND T.localTxId = ? FOR UPDATE", globalTxId, localTxId); if (backupDataCount > 0) { String pkName = this.parsePrimaryKeyColumnName(tableName); int currentDataCount = autoCompensateDao.executeQueryCount("SELECT count(*) FROM " + tableName + " T WHERE T.id IN (SELECT T1.id FROM " + schema + "." + txleBackupTableName + " T1 WHERE T1.globalTxId = ? AND T1.localTxId = ?)", globalTxId, localTxId); // in case of updating many times for some same data, to delete the previous changes, so it only has one backup for any data. if (backupDataCount == currentDataCount) { List<Map<String, Object>> columnList = autoCompensateDao.executeQuery( "SELECT GROUP_CONCAT(COLUMN_NAME) COLUMN_NAMES FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '" + schema + "' AND TABLE_NAME = '" + txleBackupTableName + "' AND COLUMN_NAME NOT IN ('globalTxId', 'localTxId')"); if (columnList != null && !columnList.isEmpty()) { StringBuilder columnNames = new StringBuilder(); String[] columnArr = columnList.get(0).get("COLUMN_NAMES").toString().split(","); for (String column : columnArr) { if (columnNames.length() == 0) { columnNames.append("T." + column); } else { columnNames.append(",T." + column); } } String backupDataSql = "SELECT " + columnNames + " FROM " + schema + "." + txleBackupTableName + " T WHERE T.globalTxId = '" + globalTxId + "' AND T.localTxId = '" + localTxId + "'"; String currentDataSql = "SELECT " + columnNames + " FROM " + tableName + " T, " + schema + "." + txleBackupTableName + " T1 WHERE T." + pkName + " = T1." + pkName + " AND T1.globalTxId = '" + globalTxId + "' AND T1.localTxId = '" + localTxId + "'"; String backupDataMD5 = getMD5Digest(backupDataSql, backupDataCount); String currentDataMD5 = getMD5Digest(currentDataSql, currentDataCount); if (backupDataMD5.equals(currentDataMD5)) { return true; } } } } throw new RuntimeException("That's not consistent between backup data and current data."); } return true; }
Example #10
Source File: AutoCompensateHandler.java From txle with Apache License 2.0 | 4 votes |
@Override public void prepareCompensationBeforeExecuting(PreparedStatement delegate, String executeSql, Map<String, Object> standbyParams) throws SQLException { String globalTxId = CurrentThreadOmegaContext.getGlobalTxIdFromCurThread(); if (globalTxId == null || globalTxId.length() == 0) { return; } String localTxId = CurrentThreadOmegaContext.getLocalTxIdFromCurThread(); if (localTxId == null || localTxId.length() == 0) { return; } // To parse SQL by SQLParser tools from Druid. MySqlStatementParser parser = new MySqlStatementParser(executeSql); SQLStatement sqlStatement = parser.parseStatement(); if (sqlStatement instanceof MySqlSelectIntoStatement) { return; } if (standbyParams == null) { standbyParams = new HashMap<>(8); } String server = CurrentThreadOmegaContext.getServiceNameFromCurThread(); // To set a relationship between localTxId and datSourceInfo, in order to determine to use the relative dataSource for localTxId when it need be compensated. DatabaseMetaData databaseMetaData = delegate.getConnection().getMetaData(); String dburl = databaseMetaData.getURL(), dbusername = databaseMetaData.getUserName(), dbdrivername = databaseMetaData.getDriverName(); DataSourceMappingCache.putLocalTxIdAndDataSourceInfo(localTxId, dburl, dbusername, dbdrivername); // To construct kafka message. standbyParams.put("dbdrivername", dbdrivername); standbyParams.put("dburl", dburl); standbyParams.put("dbusername", dbusername); if (sqlStatement instanceof MySqlInsertStatement) { return; } else if (sqlStatement instanceof MySqlUpdateStatement) { AutoCompensateUpdateHandler.newInstance().prepareCompensationBeforeUpdating(delegate, sqlStatement, executeSql, globalTxId, localTxId, server, standbyParams); } else if (sqlStatement instanceof MySqlDeleteStatement) { AutoCompensateDeleteHandler.newInstance().prepareCompensationBeforeDeleting(delegate, sqlStatement, executeSql, globalTxId, localTxId, server, standbyParams); } else { standbyParams.clear(); // Default is closed, means that just does record, if it's open, then program will throw an exception about current special SQL, just for auto-compensation. boolean checkSpecialSql = TxleStaticConfig.getBooleanConfig("txle.transaction.auto-compensation.check-special-sql", false); if (checkSpecialSql) { throw new SQLException(TxleConstants.logErrorPrefixWithTime() + "Do not support sql [" + executeSql + "] to auto-compensation."); } else { LOG.debug(TxleConstants.logDebugPrefixWithTime() + "Do not support sql [{}] to auto-compensation, but it has been executed due to the switch 'checkSpecialSql' is closed.", executeSql); } } }
Example #11
Source File: CompensateService.java From txle with Apache License 2.0 | 4 votes |
public void prepareBackupSql(TxleTransactionStart tx, TxleTxStartAck.Builder txStartAck, boolean isExistsGlobalTx, Map<String, String> localTxBackupSql) { for (TxleSubTransactionStart subTx : tx.getSubTxInfoList()) { try { SQLStatement sqlStatement = new MySqlStatementParser(subTx.getSql()).parseStatement(); String tableName = parseTableName(sqlStatement); String tableNameWithSchema = tableName; if (tableName.indexOf(".") < 0) { tableNameWithSchema = subTx.getDbSchema() + "." + tableName; } else { tableName = tableName.substring(tableName.indexOf(".") + 1); } String txleOldBackupTableName = TxleConstants.giveBackupTableNameForOldData(subTx.getDbSchema(), tableName), txleOldBackupTableNameWithSchema = this.schema + "." + txleOldBackupTableName; String txleNewBackupTableName = TxleConstants.giveBackupTableNameForNewData(subTx.getDbSchema(), tableName), txleNewBackupTableNameWithSchema = this.schema + "." + txleNewBackupTableName; // create backup table & alter structure TxleSubTxSql.Builder subTxSql = TxleSubTxSql.newBuilder().setLocalTxId(subTx.getLocalTxId()).setDbNodeId(subTx.getDbNodeId()).setDbSchema(subTx.getDbSchema()).setOrder(subTx.getOrder()); this.constructBackupSqls(tx, isExistsGlobalTx, subTx, subTxSql, tableNameWithSchema, txleOldBackupTableName, txleOldBackupTableNameWithSchema, txleNewBackupTableName, txleNewBackupTableNameWithSchema); String operation = ""; if (sqlStatement instanceof MySqlInsertStatement) { operation = "insert"; // the formal business sql subTxSql.addSubTxSql(subTx.getSql()); // the backup new data sql Object primaryKey = this.txleEhCache.get(TxleCacheType.INIT, subTx.getDbNodeId() + "." + tableNameWithSchema); if (primaryKey == null) { primaryKey = "id"; } subTxSql.addSubTxSql(String.format("INSERT INTO " + txleNewBackupTableNameWithSchema + " SELECT *, '%s', '%s' FROM %s WHERE " + primaryKey + " = (SELECT LAST_INSERT_ID()) FOR UPDATE " + TxleConstants.ACTION_SQL, tx.getGlobalTxId(), subTx.getLocalTxId(), tableNameWithSchema)); } else if (sqlStatement instanceof MySqlDeleteStatement) { operation = "delete"; subTxSql.addSubTxSql(String.format("INSERT INTO " + txleOldBackupTableNameWithSchema + " SELECT *, '%s', '%s' FROM %s WHERE %s FOR UPDATE " + TxleConstants.ACTION_SQL, tx.getGlobalTxId(), subTx.getLocalTxId(), tableNameWithSchema, ((MySqlDeleteStatement) sqlStatement).getWhere().toString())); subTxSql.addSubTxSql(subTx.getSql()); } else if (sqlStatement instanceof MySqlUpdateStatement) { operation = "update"; subTxSql.addSubTxSql(String.format("INSERT INTO " + txleOldBackupTableNameWithSchema + " SELECT *, '%s', '%s' FROM %s WHERE %s FOR UPDATE " + TxleConstants.ACTION_SQL, tx.getGlobalTxId(), subTx.getLocalTxId(), tableNameWithSchema, ((MySqlUpdateStatement) sqlStatement).getWhere().toString())); subTxSql.addSubTxSql(subTx.getSql()); subTxSql.addSubTxSql(String.format("INSERT INTO " + txleNewBackupTableNameWithSchema + " SELECT *, '%s', '%s' FROM %s WHERE %s FOR UPDATE " + TxleConstants.ACTION_SQL, tx.getGlobalTxId(), subTx.getLocalTxId(), tableNameWithSchema, ((MySqlUpdateStatement) sqlStatement).getWhere().toString())); } if (!txleEhCache.readConfigCache(TxleConstants.getServiceInstanceId(tx.getServiceName(), tx.getServiceIP()), tx.getServiceCategory(), ConfigCenterType.ClientCompensate)) { subTxSql.addSubTxSql("set autocommit=0"); subTxSql.addSubTxSql("commit"); subTxSql.addSubTxSql("set autocommit=1"); // subTxSql.addSubTxSql("begin"); } // return SQLs to client txStartAck.addSubTxSql(subTxSql.build()); final StringBuilder backupSqls = new StringBuilder(); subTxSql.getSubTxSqlList().forEach(sql -> backupSqls.append(sql + TxleConstants.STRING_SEPARATOR)); localTxBackupSql.put(subTx.getLocalTxId(), backupSqls.toString()); kafkaMessageRepository.save(new KafkaMessage(tx.getGlobalTxId(), subTx.getLocalTxId(), "", subTx.getDbNodeId(), "", tableNameWithSchema, operation, "")); } catch (Exception e) { handleExceptionWithFaultToleranceChecking("Failed to prepare sqls for backup.", e, tx, txStartAck); } } }
Example #12
Source File: CompensateService.java From txle with Apache License 2.0 | 4 votes |
public void constructCompensateSql(TxleTransactionStart tx, TxleTxStartAck.Builder txStartAck, Map<String, String> localTxCompensateSql) { for (TxleSubTransactionStart subTx : tx.getSubTxInfoList()) { try { SQLStatement sqlStatement = new MySqlStatementParser(subTx.getSql()).parseStatement(); String tableName = parseTableName(sqlStatement); String tableNameWithSchema = tableName; if (tableName.indexOf(".") < 0) { tableNameWithSchema = subTx.getDbSchema() + "." + tableName; } else { tableName = tableName.substring(tableName.indexOf(".") + 1); } String txleOldBackupTableName = TxleConstants.giveBackupTableNameForOldData(subTx.getDbSchema(), tableName); String txleNewBackupTableName = this.schema + "." + TxleConstants.giveBackupTableNameForNewData(subTx.getDbSchema(), tableName); String compensateSql = ""; if (sqlStatement instanceof MySqlInsertStatement) { compensateSql = String.format("DELETE FROM " + tableNameWithSchema + " WHERE id IN (SELECT id FROM " + txleNewBackupTableName + " WHERE globalTxId = '%s' AND localTxId = '%s') " + TxleConstants.ACTION_SQL, tx.getGlobalTxId(), subTx.getLocalTxId()); } else if (sqlStatement instanceof MySqlDeleteStatement) { compensateSql = String.format("INSERT INTO " + tableNameWithSchema + " SELECT %s FROM " + schema + "." + txleOldBackupTableName + " WHERE globalTxId = '%s' AND localTxId = '%s' " + TxleConstants.ACTION_SQL, this.readColumnNames(subTx.getDbSchema(), tableName), tx.getGlobalTxId(), subTx.getLocalTxId()); } else if (sqlStatement instanceof MySqlUpdateStatement) { String setColumns = this.constructSetColumnsForUpdate(subTx.getDbSchema(), tableName); Object primaryKey = this.txleEhCache.get(TxleCacheType.INIT, subTx.getDbNodeId() + "." + tableNameWithSchema); if (primaryKey == null) { List list = customRepository.executeQuery("SELECT T.field FROM BusinessDBLatestDetail T WHERE T.isprimarykey = 1 AND T.node = ? AND T.dbschema = ? AND T.tablename = ?", subTx.getDbNodeId(), subTx.getDbSchema(), tableName); if (list != null && !list.isEmpty()) { primaryKey = list.get(0); if (primaryKey != null) { txleEhCache.put(TxleCacheType.INIT, subTx.getDbNodeId() + "." + subTx.getDbSchema() + "." + tableName, primaryKey); } } } if (primaryKey == null) { primaryKey = "id"; } // construct reversed sql compensateSql = String.format("UPDATE %s T INNER JOIN %s T1 ON T." + primaryKey + " = T1." + primaryKey + " SET %s WHERE T1.globalTxId = '%s' AND T1.localTxId = '%s' " + TxleConstants.ACTION_SQL, tableNameWithSchema, this.schema + "." + txleOldBackupTableName, setColumns, tx.getGlobalTxId(), subTx.getLocalTxId()); } localTxCompensateSql.put(subTx.getLocalTxId(), compensateSql); } catch (Exception e) { handleExceptionWithFaultToleranceChecking("Failed to construct sql for compensation.", e, tx, txStartAck); } } }
Example #13
Source File: DruidUpdateParser.java From Mycat2 with GNU General Public License v3.0 | 4 votes |
@Override public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { //这里限制了update分片表的个数只能有一个 if (ctx.getTables() != null && getUpdateTableCount() > 1 && !schema.isNoSharding()) { String msg = "multi table related update not supported,tables:" + ctx.getTables(); LOGGER.warn(msg); throw new SQLNonTransientException(msg); } MySqlUpdateStatement update = (MySqlUpdateStatement) stmt; String tableName = StringUtil.removeBackquote(update.getTableName().getSimpleName().toUpperCase()); TableConfig tc = schema.getTables().get(tableName); if (RouterUtil.isNoSharding(schema, tableName)) {//整个schema都不分库或者该表不拆分 RouterUtil.routeForTableMeta(rrs, schema, tableName, rrs.getStatement()); rrs.setFinishedRoute(true); return; } String partitionColumn = tc.getPartitionColumn(); String joinKey = tc.getJoinKey(); if (tc.isGlobalTable() || (partitionColumn == null && joinKey == null)) { //修改全局表 update 受影响的行数 RouterUtil.routeToMultiNode(false, rrs, tc.getDataNodes(), rrs.getStatement(), tc.isGlobalTable()); rrs.setFinishedRoute(true); return; } confirmShardColumnNotUpdated(update, schema, tableName, partitionColumn, joinKey, rrs); // if(ctx.getTablesAndConditions().size() > 0) { // Map<String, Set<ColumnRoutePair>> map = ctx.getTablesAndConditions().get(tableName); // if(map != null) { // for(Map.Entry<String, Set<ColumnRoutePair>> entry : map.entrySet()) { // String column = entry.getKey(); // Set<ColumnRoutePair> value = entry.getValue(); // if(column.toUpperCase().equals(anObject)) // } // } // // } // System.out.println(); if (schema.getTables().get(tableName).isGlobalTable() && ctx.getRouteCalculateUnit().getTablesAndConditions().size() > 1) { throw new SQLNonTransientException("global table is not supported in multi table related update " + tableName); } //在解析SQL时清空该表的主键缓存 TableConfig tableConfig = schema.getTables().get(tableName); if (tableConfig != null && !tableConfig.primaryKeyIsPartionKey()) { String cacheName = schema.getName() +"_" + tableName; cacheName = cacheName.toUpperCase(); for (CachePool value : MycatServer.getInstance().getCacheService().getAllCachePools().values()) { value.clearCache(cacheName); value.getCacheStatic().reset(); } } }
Example #14
Source File: ElasticSqlSelectParser.java From elasticsearch-sql with Apache License 2.0 | 4 votes |
protected MySqlUpdateStatement parseUpdateStatment() { MySqlUpdateStatement update = new MySqlUpdateStatement(); lexer.nextToken(); if (lexer.identifierEquals(FnvHash.Constants.LOW_PRIORITY)) { lexer.nextToken(); update.setLowPriority(true); } if (lexer.identifierEquals(FnvHash.Constants.IGNORE)) { lexer.nextToken(); update.setIgnore(true); } if (lexer.identifierEquals(FnvHash.Constants.COMMIT_ON_SUCCESS)) { lexer.nextToken(); update.setCommitOnSuccess(true); } if (lexer.identifierEquals(FnvHash.Constants.ROLLBACK_ON_FAIL)) { lexer.nextToken(); update.setRollBackOnFail(true); } if (lexer.identifierEquals(FnvHash.Constants.QUEUE_ON_PK)) { lexer.nextToken(); update.setQueryOnPk(true); } if (lexer.identifierEquals(FnvHash.Constants.TARGET_AFFECT_ROW)) { lexer.nextToken(); SQLExpr targetAffectRow = this.exprParser.expr(); update.setTargetAffectRow(targetAffectRow); } if (lexer.identifierEquals(FnvHash.Constants.FORCE)) { lexer.nextToken(); if (lexer.token() == Token.ALL) { lexer.nextToken(); acceptIdentifier("PARTITIONS"); update.setForceAllPartitions(true); } else if (lexer.identifierEquals(FnvHash.Constants.PARTITIONS)){ lexer.nextToken(); update.setForceAllPartitions(true); } else if (lexer.token() == Token.PARTITION) { lexer.nextToken(); SQLName partition = this.exprParser.name(); update.setForcePartition(partition); } else { throw new ParserException("TODO. " + lexer.info()); } } while (lexer.token() == Token.HINT) { this.exprParser.parseHints(update.getHints()); } SQLSelectParser selectParser = this.exprParser.createSelectParser(); SQLTableSource updateTableSource = selectParser.parseTableSource(); update.setTableSource(updateTableSource); accept(Token.SET); for (;;) { SQLUpdateSetItem item = this.exprParser.parseUpdateSetItem(); update.addItem(item); if (lexer.token() != Token.COMMA) { break; } lexer.nextToken(); } if (lexer.token() == (Token.WHERE)) { lexer.nextToken(); update.setWhere(this.exprParser.expr()); } update.setOrderBy(this.exprParser.parseOrderBy()); update.setLimit(this.exprParser.parseLimit()); return update; }