Skip to content

Commit

Permalink
[chore](show partitions) show partitions print commit version (#28274)
Browse files Browse the repository at this point in the history
## Proposed changes

Issue Number: close #xxx
  • Loading branch information
yujun777 authored and dataroaring committed Aug 11, 2024
1 parent 9986fbc commit 33feda0
Show file tree
Hide file tree
Showing 8 changed files with 45 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ public class PartitionsProcDir implements ProcDirInterface {
.add("State").add("PartitionKey").add("Range").add("DistributionKey")
.add("Buckets").add("ReplicationNum").add("StorageMedium").add("CooldownTime").add("RemoteStoragePolicy")
.add("LastConsistencyCheckTime").add("DataSize").add("IsInMemory").add("ReplicaAllocation")
.add("IsMutable").add("SyncWithBaseTables").add("UnsyncTables")
.add("IsMutable").add("SyncWithBaseTables").add("UnsyncTables").add("CommittedVersion")
.build();

private Database db;
Expand Down Expand Up @@ -380,6 +380,9 @@ private List<Pair<List<Comparable>, TRow>> getPartitionInfosInrernal() throws An
trow.addToColumnValue(new TCell().setStringVal(FeConstants.null_string));
}

partitionInfo.add(partition.getCommittedVersion());
trow.addToColumnValue(new TCell().setLongVal(partition.getCommittedVersion()));

partitionInfos.add(Pair.of(partitionInfo, trow));
}
} finally {
Expand Down
12 changes: 6 additions & 6 deletions regression-test/suites/autobucket/test_autobucket.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ suite("test_autobucket") {
log.info("show result : ${result}")
assertTrue(result.toString().containsIgnoreCase("BUCKETS AUTO"))

result = sql "show partitions from autobucket_test"
result = sql_return_maparray "show partitions from autobucket_test"
logger.info("${result}")
// XXX: buckets at pos(8), next maybe impl by sql meta
// 10 is the default buckets without partition size
assertEquals(Integer.valueOf(result.get(0).get(8)), 10)
assertEquals(10, Integer.valueOf(result.get(0).Buckets))

sql "drop table if exists autobucket_test"

Expand All @@ -57,10 +57,10 @@ suite("test_autobucket") {
)
"""

result = sql "show partitions from autobucket_test_min_buckets"
result = sql_return_maparray "show partitions from autobucket_test_min_buckets"
logger.info("${result}")
// XXX: buckets at pos(8), next maybe impl by sql meta
assertEquals(Integer.valueOf(result.get(0).get(8)), 5)
assertEquals(5, Integer.valueOf(result.get(0).Buckets))
// set back to default
sql "ADMIN SET FRONTEND CONFIG ('autobucket_min_buckets' = '1')"
sql "drop table if exists autobucket_test_min_buckets"
Expand All @@ -81,10 +81,10 @@ suite("test_autobucket") {
)
"""

result = sql "show partitions from autobucket_test_max_buckets"
result = sql_return_maparray "show partitions from autobucket_test_max_buckets"
logger.info("${result}")
// XXX: buckets at pos(8), next maybe impl by sql meta
assertEquals(Integer.valueOf(result.get(0).get(8)), 1) //equals max bucket
assertEquals(1, Integer.valueOf(result.get(0).Buckets)) //equals max bucket
// set back to default
sql "ADMIN SET FRONTEND CONFIG ('autobucket_max_buckets' = '128')"
sql "drop table if exists autobucket_test_max_buckets"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,14 @@ suite("test_autobucket_dynamic_partition") {
log.info("show result : ${result}")
assertTrue(result.toString().containsIgnoreCase("BUCKETS AUTO"))

result = sql "show partitions from test_autobucket_dynamic_partition"
result = sql_return_maparray "show partitions from test_autobucket_dynamic_partition"
logger.info("${result}")
// XXX: buckets at pos(8), next maybe impl by sql meta
// 10 is the default buckets without partition size
assertEquals(result.size(), 3)
assertEquals(Integer.valueOf(result.get(0).get(8)), 10)
assertEquals(Integer.valueOf(result.get(1).get(8)), 10)
assertEquals(Integer.valueOf(result.get(2).get(8)), 10)
for (def partition : result) {
assertEquals(Integer.valueOf(partition.Buckets), 10)
}

sql "drop table if exists test_autobucket_dynamic_partition"
}
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,12 @@ suite("add_table_policy_by_modify_partition") {
"""

// Test that the partition's specified policy would be covered by the table's policy
def partitions = sql """
def partitions = sql_return_maparray """
show partitions from create_table_partion_use_created_policy_test
"""

for (par in partitions) {
assertTrue(par[12] == "created_create_table_partition_alter_policy")
for (def par in partitions) {
assertTrue(par.RemoteStoragePolicy == "created_create_table_partition_alter_policy")
}

sql """
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ suite("test_dynamic_partition") {
"dynamic_partition.create_history_partition"="true",
"dynamic_partition.replication_allocation" = "tag.location.default: 1")
"""
List<List<Object>> result = sql "show tables like 'dy_par'"
def result = sql "show tables like 'dy_par'"
logger.info("${result}")
assertEquals(result.size(), 1)
result = sql "show partitions from dy_par"
result = sql_return_maparray "show partitions from dy_par"
// XXX: buckets at pos(8), next maybe impl by sql meta
assertEquals(Integer.valueOf(result.get(0).get(8)), 10)
assertEquals(result.get(0).Buckets.toInteger(), 10)
sql "drop table dy_par"

sql "drop table if exists dy_par"
Expand All @@ -59,9 +59,9 @@ suite("test_dynamic_partition") {
result = sql "show tables like 'dy_par'"
logger.info("${result}")
assertEquals(result.size(), 1)
result = sql "show partitions from dy_par"
result = sql_return_maparray "show partitions from dy_par"
// XXX: buckets at pos(8), next maybe impl by sql meta
assertEquals(Integer.valueOf(result.get(0).get(8)), 10)
assertEquals(result.get(0).Buckets.toInteger(), 10)
sql "drop table dy_par"

sql "drop table if exists dy_par_bucket_set_by_distribution"
Expand All @@ -83,9 +83,9 @@ suite("test_dynamic_partition") {
result = sql "show tables like 'dy_par_bucket_set_by_distribution'"
logger.info("${result}")
assertEquals(result.size(), 1)
result = sql "show partitions from dy_par_bucket_set_by_distribution"
result = sql_return_maparray "show partitions from dy_par_bucket_set_by_distribution"
// XXX: buckets at pos(8), next maybe impl by sql meta
assertEquals(Integer.valueOf(result.get(0).get(8)), 3)
assertEquals(result.get(0).Buckets.toInteger(), 3)
sql "drop table dy_par_bucket_set_by_distribution"
sql "drop table if exists dy_par_bad"
def isCloudMode = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@
// under the License.

suite("test_dynamic_partition_with_rename") {
sql "drop table if exists test_dynamic_partition_with_rename"
def tbl = 'test_dynamic_partition_with_rename'
sql "drop table if exists ${tbl}"
sql """
CREATE TABLE IF NOT EXISTS test_dynamic_partition_with_rename
CREATE TABLE IF NOT EXISTS ${tbl}
( k1 date NOT NULL, k2 varchar(20) NOT NULL, k3 int sum NOT NULL )
AGGREGATE KEY(k1,k2)
PARTITION BY RANGE(k1) ( )
Expand All @@ -33,26 +34,26 @@ suite("test_dynamic_partition_with_rename") {
"dynamic_partition.create_history_partition"="true",
"dynamic_partition.replication_allocation" = "tag.location.default: 1")
"""
def result = sql "show partitions from test_dynamic_partition_with_rename"
def result = sql_return_maparray "show partitions from ${tbl}"
assertEquals(7, result.size())

// rename distributed column, then try to add too more dynamic partition
sql "alter table test_dynamic_partition_with_rename rename column k1 renamed_k1"
sql """ ADMIN SET FRONTEND CONFIG ('dynamic_partition_check_interval_seconds' = '1') """
sql """ alter table test_dynamic_partition_with_rename set('dynamic_partition.end'='5') """
result = sql "show partitions from test_dynamic_partition_with_rename"
sql """ alter table ${tbl} set('dynamic_partition.end'='5') """
result = sql_return_maparray "show partitions from ${tbl}"
for (def retry = 0; retry < 120; retry++) { // at most wait 120s
if (result.size() == 9) {
break;
}
logger.info("wait dynamic partition scheduler, sleep 1s")
sleep(1000); // sleep 1s
result = sql "show partitions from test_dynamic_partition_with_rename"
result = sql_return_maparray "show partitions from ${tbl}"
}
assertEquals(9, result.size())
for (def line = 0; line < result.size(); line++) {
// XXX: DistributionKey at pos(7), next maybe impl by sql meta
assertEquals("renamed_k1", result.get(line).get(7))
assertEquals("renamed_k1", result.get(line).DistributionKey)
}

sql "drop table test_dynamic_partition_with_rename"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ suite("test_multi_partition_key", "p0") {
sql "select * from ${tableName} order by k1, k2"
resultFile "partition_table.out"
}
def result = sql "SHOW PARTITIONS FROM ${tableName}"
def result = sql_return_maparray "SHOW PARTITIONS FROM ${tableName}"
assertTrue(result.size() > 1)
if (ifDropTbl) {
try_sql """DROP TABLE ${tableName}"""
Expand Down Expand Up @@ -139,8 +139,8 @@ suite("test_multi_partition_key", "p0") {
false
)
// expect partition_f range: [ [126, 126] ~ [500, -128] )
def ret = sql "SHOW PARTITIONS FROM test_default_minvalue WHERE PartitionName='partition_f'"
assertTrue(ret[0][6].contains("[500, -128]"))
def ret = sql_return_maparray "SHOW PARTITIONS FROM test_default_minvalue WHERE PartitionName='partition_f'"
assertTrue(ret[0].Range.contains("[500, -128]"))

// partition columns error
test {
Expand Down Expand Up @@ -221,8 +221,8 @@ suite("test_multi_partition_key", "p0") {
}

sql "ALTER TABLE test_multi_col_test_partition_add ADD PARTITION partition_add VALUES LESS THAN ('30', '1000') "
def ret_add_p = sql "SHOW PARTITIONS FROM test_multi_col_test_partition_add WHERE PartitionName='partition_add'"
assertTrue(ret[0][6].contains("[500, -128]"))
def ret_add_p = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_partition_add WHERE PartitionName='partition_add'"
assertTrue(ret[0].Range.contains("[500, -128]"))
test {
sql "ALTER TABLE test_multi_col_test_partition_add ADD PARTITION add_partition_wrong " +
"VALUES LESS THAN ('30', '800') DISTRIBUTED BY hash(k1) BUCKETS 5"
Expand All @@ -243,11 +243,11 @@ suite("test_multi_partition_key", "p0") {
false
)
sql "ALTER TABLE test_multi_col_test_partition_drop DROP PARTITION partition_d"
def ret_drop_p = sql "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_d'"
def ret_drop_p = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_d'"
assertEquals(0, ret_drop_p.size())
sql "ALTER TABLE test_multi_col_test_partition_drop ADD PARTITION partition_dd VALUES LESS THAN ('0','0') "
ret_drop_p = sql "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_dd'"
assertTrue(ret_drop_p[0][6].contains("[0, 0]"))
ret_drop_p = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_partition_drop WHERE PartitionName='partition_dd'"
assertTrue(ret_drop_p[0].Range.contains("[0, 0]"))
// null value in the lowest partition, if drop the partition null is deleted.
sql """drop table if exists test_multi_col_test_partition_null_value"""
sql """
Expand Down Expand Up @@ -366,8 +366,8 @@ suite("test_multi_partition_key", "p0") {
if (!isCloudMode()) {
sql "ALTER TABLE test_multi_col_test_rollup MODIFY PARTITION partition_a SET( 'replication_num' = '1')"
}
ret = sql "SHOW PARTITIONS FROM test_multi_col_test_rollup WHERE PartitionName='partition_a'"
assertEquals('1', ret[0][9])
ret = sql_return_maparray "SHOW PARTITIONS FROM test_multi_col_test_rollup WHERE PartitionName='partition_a'"
assertEquals(1, ret[0].ReplicationNum as int)
// create table with range partition
testPartitionTbl(
"test_multi_column_fixed_range_1",
Expand All @@ -393,7 +393,7 @@ suite("test_multi_partition_key", "p0") {
)
// add partition with range
sql "ALTER TABLE test_multi_column_fixed_range_1 ADD PARTITION partition_add VALUES LESS THAN ('50','1000') "
ret = sql "SHOW PARTITIONS FROM test_multi_column_fixed_range_1 WHERE PartitionName='partition_add'"
ret = sql_return_maparray "SHOW PARTITIONS FROM test_multi_column_fixed_range_1 WHERE PartitionName='partition_add'"
assertEquals(1, ret.size(), )
test {
sql "ALTER TABLE test_multi_column_fixed_range_1 ADD PARTITION add_partition_wrong VALUES LESS THAN ('50','800')"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,10 +211,10 @@ suite("multi_thread_load", "p1,nonConcurrent") { // stress case should use resou
def row_count_range = sql """select count() from ${table_name};"""
assertTrue(data_count*rows == row_count_range[0][0], "${data_count*rows}, ${row_count_range[0][0]}")
// check there's no intersect in partitions
def partition_res_range = sql """show partitions from ${table_name} order by PartitionName;"""
def partition_res_range = sql_return_maparray """show partitions from ${table_name} order by PartitionName;"""
for (int i = 0; i < partition_res_range.size(); i++) {
for (int j = i+1; j < partition_res_range.size(); j++) {
if (partition_res_range[i][6] == partition_res_range[j][6]) {
if (partition_res_range[i].Range == partition_res_range[j].Range) {
assertTrue(false, "$i, $j")
}
}
Expand Down

0 comments on commit 33feda0

Please sign in to comment.