Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 52 additions & 2 deletions lib/collection/tests/integration/continuous_snapshot_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,21 @@ use collection::operations::point_ops::{
PointInsertOperationsInternal, PointOperations, PointStructPersisted, VectorStructPersisted,
WriteOrdering,
};
use collection::operations::shard_selector_internal::ShardSelectorInternal;
use collection::operations::shared_storage_config::SharedStorageConfig;
use collection::operations::types::{CollectionResult, NodeType, UpdateStatus, VectorsConfig};
use collection::operations::types::{
CollectionResult, NodeType, PointRequestInternal, UpdateStatus, VectorsConfig,
};
use collection::operations::vector_params_builder::VectorParamsBuilder;
use collection::shards::channel_service::ChannelService;
use collection::shards::collection_shard_distribution::CollectionShardDistribution;
use collection::shards::replica_set::ReplicaState;
use common::budget::ResourceBudget;
use common::counter::hardware_accumulator::HwMeasurementAcc;
use common::flags::{FeatureFlags, init_feature_flags};
use segment::types::Distance;
use segment::payload_json;
use segment::types::{Distance, WithVector};
use shard::operations::payload_ops::{PayloadOps, SetPayloadOp};
use tempfile::Builder;
use tokio::time::sleep;

Expand Down Expand Up @@ -142,6 +147,51 @@ async fn test_continuous_snapshot() {
.await?;
assert_eq!(insert.status, UpdateStatus::Completed);
}

// Retrieve one point at a time
for i in 0..points_count {
let retrieve_point = PointRequestInternal {
ids: vec![i.into()],
with_payload: None,
with_vector: WithVector::Bool(false),
};
let hw_counter = HwMeasurementAcc::disposable();
let retrieve_result = collection
.retrieve(
retrieve_point,
None,
&ShardSelectorInternal::All,
None,
hw_counter,
)
.await?;
assert_eq!(retrieve_result.len(), 1);
}

// Set payload one point at a time
for i in 0..points_count {
let set_payload = CollectionUpdateOperations::PayloadOperation(
PayloadOps::SetPayload(SetPayloadOp {
payload: payload_json! {
"city": "London",
"color": "green",
},
points: Some(vec![i.into()]),
filter: None,
key: None,
}),
);
let hw_counter = HwMeasurementAcc::disposable();
let set_result = collection
.update_from_client_simple(
set_payload,
true,
WriteOrdering::default(),
hw_counter,
)
.await?;
assert_eq!(set_result.status, UpdateStatus::Completed);
}
}
CollectionResult::Ok(())
})
Expand Down
5 changes: 4 additions & 1 deletion lib/shard/src/proxy_segment/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,10 @@ impl ProxySegment {
let already_deleted = raw_segment_guard.get_deleted_points_bitvec();
Some(already_deleted)
}
LockedSegment::Proxy(_) => None,
LockedSegment::Proxy(_) => {
log::debug!("Double proxy segment creation");
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shall we demote this to trace?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd like to keep this way because double proxies are mysterious beasts.

Seeing it in the log is a clear sign that we are dealing with a complex situation like snapshot + optimizer.

None
}
};
let wrapped_config = segment.get().read().config().clone();
ProxySegment {
Expand Down
20 changes: 14 additions & 6 deletions lib/shard/src/segment_holder/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -969,6 +969,8 @@ impl SegmentHolder {
log::trace!("Applying function on all proxied shard segments");
let mut result = Ok(());
let mut unproxied_segment_ids = Vec::with_capacity(proxies.len());
// Reverse to unproxify first non-appendable segments
proxies.reverse();
Copy link
Copy Markdown
Member Author

@agourlay agourlay Sep 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not have a rock solid rational for this change, but it is necessary for the fix below to work properly

for (segment_id, proxy_segment) in &proxies {
// Get segment to snapshot
let op_result = match proxy_segment {
Expand Down Expand Up @@ -997,13 +999,19 @@ impl SegmentHolder {
}

// Try to unproxy/release this segment since we don't use it anymore
// Unproxying now lets us release the segment earlier, prevent unnecessary writes to the temporary segment
match Self::try_unproxy_segment(segments_lock, *segment_id, proxy_segment.clone()) {
Ok(lock) => {
segments_lock = lock;
unproxied_segment_ids.push(*segment_id);
// Unproxying now lets us release the segment earlier, prevent unnecessary writes to the temporary segment.
// Make sure to keep at least one proxy segment to maintain access to the points in the shared write segment.
// The last proxy and the shared write segment will be promoted into the segment_holder atomically
// by `Self::unproxy_all_segments` afterwards to maintain the read consistency.
let remaining = proxies.len() - unproxied_segment_ids.len();
if remaining > 1 {
match Self::try_unproxy_segment(segments_lock, *segment_id, proxy_segment.clone()) {
Ok(lock) => {
segments_lock = lock;
unproxied_segment_ids.push(*segment_id);
}
Err(lock) => segments_lock = lock,
}
Err(lock) => segments_lock = lock,
}
}
proxies.retain(|(id, _)| !unproxied_segment_ids.contains(id));
Expand Down
4 changes: 4 additions & 0 deletions lib/shard/src/update.rs
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,10 @@ fn upsert_with_payload(
} else {
res &= segment.clear_payload(op_num, point_id, hw_counter)?;
}
debug_assert!(
segment.has_point(point_id),
"the point {point_id} should be present immediately after the upsert"
);
Ok(res)
}

Expand Down
Loading