Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix issue 1920 #1931

Merged
merged 1 commit into from
Aug 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions src/include/storage/copier/column_chunk.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,11 @@ class NullColumnChunk : public BoolColumnChunk {
inline void resetNullBuffer() { memset(buffer.get(), 0 /* non null */, numBytes); }

protected:
uint64_t numBytesForValues(common::offset_t numValues) const {
inline uint64_t numBytesForValues(common::offset_t numValues) const {
// 8 values per byte, and we need a buffer size which is a multiple of 8 bytes
return ceil(numValues / 8.0 / 8.0) * 8;
}
void initialize(common::offset_t numValues) final {
inline void initialize(common::offset_t numValues) final {
numBytesPerValue = 0;
numBytes = numBytesForValues(numValues);
// Each byte defaults to 0, indicating everything is non-null
Expand All @@ -186,6 +186,20 @@ class FixedListColumnChunk : public ColumnChunk {
void write(const common::Value& fixedListVal, uint64_t posToWrite) final;
};

class SerialColumnChunk : public ColumnChunk {
public:
SerialColumnChunk()
: ColumnChunk{common::LogicalType(common::LogicalTypeID::SERIAL),
nullptr /* copyDescription */, false /* hasNullChunk */} {}

inline void initialize(common::offset_t numValues) final {
numBytesPerValue = 0;
numBytes = 0;
// Each byte defaults to 0, indicating everything is non-null
buffer = std::make_unique<uint8_t[]>(numBytes);
}
};

struct ColumnChunkFactory {
static std::unique_ptr<ColumnChunk> createColumnChunk(
const common::LogicalType& dataType, common::CopyDescription* copyDescription = nullptr);
Expand Down
30 changes: 17 additions & 13 deletions src/storage/copier/column_chunk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -436,29 +436,33 @@ std::unique_ptr<ColumnChunk> ColumnChunkFactory::createColumnChunk(
const LogicalType& dataType, CopyDescription* copyDescription) {
std::unique_ptr<ColumnChunk> chunk;
switch (dataType.getPhysicalType()) {
case PhysicalTypeID::BOOL:
case PhysicalTypeID::BOOL: {
chunk = std::make_unique<BoolColumnChunk>(copyDescription);
break;
} break;
case PhysicalTypeID::INT64:
case PhysicalTypeID::INT32:
case PhysicalTypeID::INT16:
case PhysicalTypeID::DOUBLE:
case PhysicalTypeID::FLOAT:
case PhysicalTypeID::INTERVAL:
chunk = std::make_unique<ColumnChunk>(dataType, copyDescription);
break;
case PhysicalTypeID::FIXED_LIST:
case PhysicalTypeID::INTERVAL: {
if (dataType.getLogicalTypeID() == LogicalTypeID::SERIAL) {
chunk = std::make_unique<SerialColumnChunk>();
} else {
chunk = std::make_unique<ColumnChunk>(dataType, copyDescription);
}
} break;
case PhysicalTypeID::FIXED_LIST: {
chunk = std::make_unique<FixedListColumnChunk>(dataType, copyDescription);
break;
case PhysicalTypeID::STRING:
} break;
case PhysicalTypeID::STRING: {
chunk = std::make_unique<StringColumnChunk>(dataType, copyDescription);
break;
case PhysicalTypeID::VAR_LIST:
} break;
case PhysicalTypeID::VAR_LIST: {
chunk = std::make_unique<VarListColumnChunk>(dataType, copyDescription);
break;
case PhysicalTypeID::STRUCT:
} break;
case PhysicalTypeID::STRUCT: {
chunk = std::make_unique<StructColumnChunk>(dataType, copyDescription);
break;
} break;
default: {
throw NotImplementedException("ColumnChunkFactory::createColumnChunk for data type " +
LogicalTypeUtils::dataTypeToString(dataType) +
Expand Down
2 changes: 1 addition & 1 deletion src/storage/store/node_column.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ void SerialNodeColumn::lookup(

page_idx_t SerialNodeColumn::append(
ColumnChunk* columnChunk, page_idx_t startPageIdx, uint64_t nodeGroupIdx) {
// DO NOTHING.
metadataDA->resize(nodeGroupIdx + 1);
return 0;
}

Expand Down
1 change: 1 addition & 0 deletions src/storage/store/node_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ void NodeTable::insert(Transaction* transaction, ValueVector* nodeIDVector,
column->write(nodeIDVector, propertyVectors[propertyIDToVectorIdx.at(propertyID)]);
}
}
wal->addToUpdatedNodeTables(tableID);
}

void NodeTable::update(
Expand Down
8 changes: 3 additions & 5 deletions src/storage/store/string_node_column.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ void StringNodeColumn::scan(transaction::Transaction* transaction, node_group_id
NodeColumn::scan(transaction, nodeGroupIdx, startOffsetInGroup, endOffsetInGroup, resultVector,
offsetInVector);
auto numValuesToRead = endOffsetInGroup - startOffsetInGroup;
auto overflowPageIdx =
overflowMetadataDA->get(nodeGroupIdx, TransactionType::READ_ONLY).pageIdx;
auto overflowPageIdx = overflowMetadataDA->get(nodeGroupIdx, transaction->getType()).pageIdx;
for (auto i = 0u; i < numValuesToRead; i++) {
auto pos = offsetInVector + i;
if (resultVector->isNull(pos)) {
Expand Down Expand Up @@ -82,8 +81,7 @@ void StringNodeColumn::scanInternal(
assert(startNodeOffset % DEFAULT_VECTOR_CAPACITY == 0);
auto nodeGroupIdx = StorageUtils::getNodeGroupIdxFromNodeOffset(startNodeOffset);
NodeColumn::scanInternal(transaction, nodeIDVector, resultVector);
auto overflowPageIdx =
overflowMetadataDA->get(nodeGroupIdx, TransactionType::READ_ONLY).pageIdx;
auto overflowPageIdx = overflowMetadataDA->get(nodeGroupIdx, transaction->getType()).pageIdx;
for (auto i = 0u; i < nodeIDVector->state->selVector->selectedSize; i++) {
auto pos = nodeIDVector->state->selVector->selectedPositions[i];
if (resultVector->isNull(pos)) {
Expand All @@ -100,7 +98,7 @@ void StringNodeColumn::lookupInternal(
auto startNodeOffset = nodeIDVector->readNodeOffset(0);
auto overflowPageIdx = overflowMetadataDA
->get(StorageUtils::getNodeGroupIdxFromNodeOffset(startNodeOffset),
TransactionType::READ_ONLY)
transaction->getType())
.pageIdx;
NodeColumn::lookupInternal(transaction, nodeIDVector, resultVector);
for (auto i = 0u; i < nodeIDVector->state->selVector->selectedSize; i++) {
Expand Down
38 changes: 19 additions & 19 deletions test/test_files/tck/match/match1.test
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@
Binder exception: No node table exists in database.

# Matching all nodes
#-CASE Scenario2
#-STATEMENT CREATE NODE TABLE A(ID SERIAL, name STRING, PRIMARY KEY(ID));
#---- ok
#-STATEMENT CREATE NODE TABLE B(ID SERIAL, name STRING, PRIMARY KEY(ID));
#---- ok
#-STATEMENT CREATE (:A), (:B {name: 'b'});
#---- ok
#-STATEMENT MATCH (n) RETURN n;
#---- 2
#{_ID: 0:0, _LABEL: A, ID: 0}
#{_ID: 1:0, _LABEL: B, ID: 0, name: b}
-CASE Scenario2
-STATEMENT CREATE NODE TABLE A(ID SERIAL, name STRING, PRIMARY KEY(ID));
---- ok
-STATEMENT CREATE NODE TABLE B(ID SERIAL, name STRING, PRIMARY KEY(ID));
---- ok
-STATEMENT CREATE (:A), (:B {name: 'b'});
---- ok
-STATEMENT MATCH (n) RETURN n;
---- 2
{_ID: 0:0, _LABEL: A, ID: 0}
{_ID: 1:0, _LABEL: B, ID: 0, name: b}

# Matching nodes using multiple labels
-CASE Scenario3
Expand All @@ -38,14 +38,14 @@ Binder exception: No node table exists in database.
{_ID: 1:0, _LABEL: B, ID: 0}

# Simple node inlnie property predicate
#-CASE Scenario4
#-STATEMENT CREATE NODE TABLE A(ID SERIAL, name STRING, firstName STRING, PRIMARY KEY(ID));
#---- ok
#-STATEMENT CREATE (:A {name: 'bar'}), (:A {name: 'monkey'}), (:A {firstName: 'bar'});
#---- ok
#-STATEMENT MATCH (n {name: 'bar'}) RETURN n;
#---- 1
#{_ID: 0:0, _LABEL: A, ID: 0, name: bar}
-CASE Scenario4
-STATEMENT CREATE NODE TABLE A(ID SERIAL, name STRING, firstName STRING, PRIMARY KEY(ID));
---- ok
-STATEMENT CREATE (:A {name: 'bar'}), (:A {name: 'monkey'}), (:A {firstName: 'bar'});
---- ok
-STATEMENT MATCH (n {name: 'bar'}) RETURN n;
---- 1
{_ID: 0:0, _LABEL: A, ID: 0, name: bar}


# Use multiple MATCH clauses to do a Cartesian product
Expand Down
45 changes: 22 additions & 23 deletions tools/rust_api/src/value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -978,31 +978,30 @@ mod tests {
}

#[test]
/// Commented out since variable-length insertion is disabled right now
/// Tests that passing the values through the database returns what we put in
fn test_serial() -> Result<()> {
// let temp_dir = tempfile::tempdir()?;
// let db = Database::new(temp_dir.path(), 0)?;
// let conn = Connection::new(&db)?;
// conn.query("CREATE NODE TABLE Person(id SERIAL, name STRING, PRIMARY KEY(id));")?;
// conn.query("CREATE (:Person {name: \"Bob\"});")?;
// conn.query("CREATE (:Person {name: \"Alice\"});")?;
// let result = conn.query("MATCH (a:Person) RETURN a.name, a.id;")?;
// assert_eq!(
// result.get_column_data_types(),
// vec![LogicalType::String, LogicalType::Serial]
// );
// let results: Vec<(Value, Value)> = result
// .map(|mut x| (x.pop().unwrap(), x.pop().unwrap()))
// .collect();
// assert_eq!(
// results,
// vec![
// (Value::Int64(0), "Bob".into()),
// (Value::Int64(1), "Alice".into())
// ]
// );
// temp_dir.close()?;
let temp_dir = tempfile::tempdir()?;
let db = Database::new(temp_dir.path(), 0)?;
let conn = Connection::new(&db)?;
conn.query("CREATE NODE TABLE Person(id SERIAL, name STRING, PRIMARY KEY(id));")?;
conn.query("CREATE (:Person {name: \"Bob\"});")?;
conn.query("CREATE (:Person {name: \"Alice\"});")?;
let result = conn.query("MATCH (a:Person) RETURN a.name, a.id;")?;
assert_eq!(
result.get_column_data_types(),
vec![LogicalType::String, LogicalType::Serial]
);
let results: Vec<(Value, Value)> = result
.map(|mut x| (x.pop().unwrap(), x.pop().unwrap()))
.collect();
assert_eq!(
results,
vec![
(Value::Int64(0), "Bob".into()),
(Value::Int64(1), "Alice".into())
]
);
temp_dir.close()?;
Ok(())
}
}
Loading