mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2026-04-08 10:11:30 -06:00
Merge 27436002c7 into a6b43651ca
This commit is contained in:
commit
b773d504ab
1
migrations/mysql/2026-03-09-005927_add_archives/down.sql
Normal file
1
migrations/mysql/2026-03-09-005927_add_archives/down.sql
Normal file
@ -0,0 +1 @@
|
||||
DROP TABLE archives;
|
||||
10
migrations/mysql/2026-03-09-005927_add_archives/up.sql
Normal file
10
migrations/mysql/2026-03-09-005927_add_archives/up.sql
Normal file
@ -0,0 +1,10 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
|
||||
CREATE TABLE archives (
|
||||
user_uuid CHAR(36) NOT NULL,
|
||||
cipher_uuid CHAR(36) NOT NULL,
|
||||
archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (user_uuid, cipher_uuid),
|
||||
FOREIGN KEY (user_uuid) REFERENCES users (uuid) ON DELETE CASCADE,
|
||||
FOREIGN KEY (cipher_uuid) REFERENCES ciphers (uuid) ON DELETE CASCADE
|
||||
);
|
||||
@ -0,0 +1 @@
|
||||
DROP TABLE archives;
|
||||
@ -0,0 +1,8 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
|
||||
CREATE TABLE archives (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
|
||||
archived_at TIMESTAMP NOT NULL DEFAULT now(),
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
@ -0,0 +1 @@
|
||||
DROP TABLE archives;
|
||||
8
migrations/sqlite/2026-03-09-005927-add_archives/up.sql
Normal file
8
migrations/sqlite/2026-03-09-005927-add_archives/up.sql
Normal file
@ -0,0 +1,8 @@
|
||||
DROP TABLE IF EXISTS archives;
|
||||
|
||||
CREATE TABLE archives (
|
||||
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
|
||||
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
|
||||
archived_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||
);
|
||||
@ -19,9 +19,9 @@ use crate::{
|
||||
crypto,
|
||||
db::{
|
||||
models::{
|
||||
Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId,
|
||||
CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType,
|
||||
OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
|
||||
Archive, Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup,
|
||||
CollectionId, CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership,
|
||||
MembershipType, OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
|
||||
},
|
||||
DbConn, DbPool,
|
||||
},
|
||||
@ -96,6 +96,10 @@ pub fn routes() -> Vec<Route> {
|
||||
post_collections_update,
|
||||
post_collections_admin,
|
||||
put_collections_admin,
|
||||
archive_cipher_put,
|
||||
archive_cipher_selected,
|
||||
unarchive_cipher_put,
|
||||
unarchive_cipher_selected,
|
||||
]
|
||||
}
|
||||
|
||||
@ -293,6 +297,7 @@ pub struct CipherData {
|
||||
// when using older client versions, or if the operation doesn't involve
|
||||
// updating an existing cipher.
|
||||
last_known_revision_date: Option<String>,
|
||||
archived_date: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@ -533,6 +538,17 @@ pub async fn update_cipher_from_data(
|
||||
cipher.save(conn).await?;
|
||||
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
|
||||
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
|
||||
let archived_at = match data.archived_date {
|
||||
Some(dt_str) => match NaiveDateTime::parse_from_str(&dt_str, "%+") {
|
||||
Ok(dt) => Some(dt),
|
||||
Err(err) => {
|
||||
warn!("Error parsing ArchivedDate '{dt_str}': {err}");
|
||||
None
|
||||
}
|
||||
},
|
||||
None => None,
|
||||
};
|
||||
cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?;
|
||||
|
||||
if ut != UpdateType::None {
|
||||
// Only log events for organizational ciphers
|
||||
@ -1715,6 +1731,36 @@ async fn purge_personal_vault(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/archive")]
|
||||
async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
set_archived_cipher_by_uuid(&cipher_id, &headers, true, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/archive", data = "<data>")]
|
||||
async fn archive_cipher_selected(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
set_archived_multiple_ciphers(data, &headers, true, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/unarchive")]
|
||||
async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
set_archived_cipher_by_uuid(&cipher_id, &headers, false, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/unarchive", data = "<data>")]
|
||||
async fn unarchive_cipher_selected(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
set_archived_multiple_ciphers(data, &headers, false, &conn, &nt).await
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum CipherDeleteOptions {
|
||||
SoftSingle,
|
||||
@ -1933,6 +1979,71 @@ async fn _delete_cipher_attachment_by_id(
|
||||
Ok(Json(json!({"cipher":cipher_json})))
|
||||
}
|
||||
|
||||
async fn set_archived_cipher_by_uuid(
|
||||
cipher_id: &CipherId,
|
||||
headers: &Headers,
|
||||
archived: bool,
|
||||
multi_archive: bool,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
|
||||
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
|
||||
err!("Cipher is not accessible for the current user")
|
||||
}
|
||||
|
||||
let archived_at = if archived {
|
||||
Some(Utc::now().naive_utc())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?;
|
||||
|
||||
if !multi_archive {
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
async fn set_archived_multiple_ciphers(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: &Headers,
|
||||
archived: bool,
|
||||
conn: &DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
let data = data.into_inner();
|
||||
|
||||
let mut ciphers: Vec<Value> = Vec::new();
|
||||
for cipher_id in data.ids {
|
||||
match set_archived_cipher_by_uuid(&cipher_id, headers, archived, true, conn, nt).await {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err,
|
||||
}
|
||||
}
|
||||
|
||||
// Multi archive actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": ciphers,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
})))
|
||||
}
|
||||
|
||||
/// This will hold all the necessary data to improve a full sync of all the ciphers
|
||||
/// It can be used during the `Cipher::to_json()` call.
|
||||
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
|
||||
@ -1942,6 +2053,7 @@ pub struct CipherSyncData {
|
||||
pub cipher_folders: HashMap<CipherId, FolderId>,
|
||||
pub cipher_favorites: HashSet<CipherId>,
|
||||
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
|
||||
pub cipher_archives: HashMap<CipherId, NaiveDateTime>,
|
||||
pub members: HashMap<OrganizationId, Membership>,
|
||||
pub user_collections: HashMap<CollectionId, CollectionUser>,
|
||||
pub user_collections_groups: HashMap<CollectionId, CollectionGroup>,
|
||||
@ -1958,6 +2070,7 @@ impl CipherSyncData {
|
||||
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self {
|
||||
let cipher_folders: HashMap<CipherId, FolderId>;
|
||||
let cipher_favorites: HashSet<CipherId>;
|
||||
let cipher_archives: HashMap<CipherId, NaiveDateTime>;
|
||||
match sync_type {
|
||||
// User Sync supports Folders and Favorites
|
||||
CipherSyncType::User => {
|
||||
@ -1966,12 +2079,16 @@ impl CipherSyncData {
|
||||
|
||||
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
|
||||
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect();
|
||||
|
||||
// Generate a HashMap with the Cipher UUID as key and the archived date time as value
|
||||
cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect();
|
||||
}
|
||||
// Organization Sync does not support Folders and Favorites.
|
||||
// If these are set, it will cause issues in the web-vault.
|
||||
CipherSyncType::Organization => {
|
||||
cipher_folders = HashMap::with_capacity(0);
|
||||
cipher_favorites = HashSet::with_capacity(0);
|
||||
cipher_archives = HashMap::with_capacity(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2034,6 +2151,7 @@ impl CipherSyncData {
|
||||
};
|
||||
|
||||
Self {
|
||||
cipher_archives,
|
||||
cipher_attachments,
|
||||
cipher_folders,
|
||||
cipher_favorites,
|
||||
|
||||
@ -204,11 +204,11 @@ fn config() -> Json<Value> {
|
||||
// Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12
|
||||
// Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31
|
||||
// iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7
|
||||
let feature_states = parse_experimental_client_feature_flags(
|
||||
let mut feature_states = parse_experimental_client_feature_flags(
|
||||
&CONFIG.experimental_client_feature_flags(),
|
||||
FeatureFlagFilter::ValidOnly,
|
||||
);
|
||||
// Add default feature_states here if needed, currently no features are needed by default.
|
||||
feature_states.insert("pm-19148-innovation-archive".to_string(), true);
|
||||
|
||||
Json(json!({
|
||||
// Note: The clients use this version to handle backwards compatibility concerns
|
||||
|
||||
97
src/db/models/archive.rs
Normal file
97
src/db/models/archive.rs
Normal file
@ -0,0 +1,97 @@
|
||||
use chrono::NaiveDateTime;
|
||||
use diesel::prelude::*;
|
||||
|
||||
use super::{CipherId, User, UserId};
|
||||
use crate::api::EmptyResult;
|
||||
use crate::db::schema::archives;
|
||||
use crate::db::DbConn;
|
||||
use crate::error::MapResult;
|
||||
|
||||
#[derive(Identifiable, Queryable, Insertable)]
|
||||
#[diesel(table_name = archives)]
|
||||
#[diesel(primary_key(user_uuid, cipher_uuid))]
|
||||
pub struct Archive {
|
||||
pub user_uuid: UserId,
|
||||
pub cipher_uuid: CipherId,
|
||||
pub archived_at: NaiveDateTime,
|
||||
}
|
||||
|
||||
impl Archive {
|
||||
// Returns the date the specified cipher was archived
|
||||
pub async fn get_archived_at(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
db_run! { conn: {
|
||||
archives::table
|
||||
.filter(archives::cipher_uuid.eq(cipher_uuid))
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.select(archives::archived_at)
|
||||
.first::<NaiveDateTime>(conn).ok()
|
||||
}}
|
||||
}
|
||||
|
||||
// Sets the specified cipher to be archived or unarchived
|
||||
pub async fn set_archived_at(
|
||||
archived_at: Option<NaiveDateTime>,
|
||||
cipher_uuid: &CipherId,
|
||||
user_uuid: &UserId,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
let existing = Self::get_archived_at(cipher_uuid, user_uuid, conn).await;
|
||||
|
||||
match (existing, archived_at) {
|
||||
// Not archived - archive at the provided timestamp
|
||||
(None, Some(dt)) => {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn: {
|
||||
diesel::insert_into(archives::table)
|
||||
.values((
|
||||
archives::user_uuid.eq(user_uuid),
|
||||
archives::cipher_uuid.eq(cipher_uuid),
|
||||
archives::archived_at.eq(dt),
|
||||
))
|
||||
.execute(conn)
|
||||
.map_res("Error archiving")
|
||||
}}
|
||||
}
|
||||
// Already archived - update with the provided timestamp
|
||||
(Some(_), Some(dt)) => {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn: {
|
||||
diesel::update(
|
||||
archives::table
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.filter(archives::cipher_uuid.eq(cipher_uuid))
|
||||
)
|
||||
.set(archives::archived_at.eq(dt))
|
||||
.execute(conn)
|
||||
.map_res("Error updating archive date")
|
||||
}}
|
||||
}
|
||||
(Some(_), None) => {
|
||||
User::update_uuid_revision(user_uuid, conn).await;
|
||||
db_run! { conn: {
|
||||
diesel::delete(
|
||||
archives::table
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.filter(archives::cipher_uuid.eq(cipher_uuid))
|
||||
)
|
||||
.execute(conn)
|
||||
.map_res("Error unarchiving")
|
||||
}}
|
||||
}
|
||||
// Otherwise, the archived status is already what it should be
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a vec with (cipher_uuid, archived_at)
|
||||
/// This is used during a full sync so we only need one query for all archive matches
|
||||
pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> {
|
||||
db_run! { conn: {
|
||||
archives::table
|
||||
.filter(archives::user_uuid.eq(user_uuid))
|
||||
.select((archives::cipher_uuid, archives::archived_at))
|
||||
.load::<(CipherId, NaiveDateTime)>(conn)
|
||||
.unwrap_or_default()
|
||||
}}
|
||||
}
|
||||
}
|
||||
@ -10,8 +10,8 @@ use diesel::prelude::*;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{
|
||||
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
|
||||
MembershipType, OrganizationId, User, UserId,
|
||||
Archive, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership,
|
||||
MembershipStatus, MembershipType, OrganizationId, User, UserId,
|
||||
};
|
||||
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
|
||||
use macros::UuidFromParam;
|
||||
@ -380,6 +380,11 @@ impl Cipher {
|
||||
} else {
|
||||
self.is_favorite(user_uuid, conn).await
|
||||
});
|
||||
json_object["archivedDate"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
cipher_sync_data.cipher_archives.get(&self.uuid).map_or(Value::Null, |d| Value::String(format_date(d)))
|
||||
} else {
|
||||
self.get_archived_at(user_uuid, conn).await.map_or(Value::Null, |d| Value::String(format_date(&d)))
|
||||
});
|
||||
// These values are true by default, but can be false if the
|
||||
// cipher belongs to a collection or group where the org owner has enabled
|
||||
// the "Read Only" or "Hide Passwords" restrictions for the user.
|
||||
@ -742,6 +747,19 @@ impl Cipher {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_archived_at(&self, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||
Archive::get_archived_at(&self.uuid, user_uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn set_archived_at(
|
||||
&self,
|
||||
archived_at: Option<NaiveDateTime>,
|
||||
user_uuid: &UserId,
|
||||
conn: &DbConn,
|
||||
) -> EmptyResult {
|
||||
Archive::set_archived_at(archived_at, &self.uuid, user_uuid, conn).await
|
||||
}
|
||||
|
||||
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> {
|
||||
db_run! { conn: {
|
||||
folders_ciphers::table
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
mod archive;
|
||||
mod attachment;
|
||||
mod auth_request;
|
||||
mod cipher;
|
||||
@ -17,6 +18,7 @@ mod two_factor_duo_context;
|
||||
mod two_factor_incomplete;
|
||||
mod user;
|
||||
|
||||
pub use self::archive::Archive;
|
||||
pub use self::attachment::{Attachment, AttachmentId};
|
||||
pub use self::auth_request::{AuthRequest, AuthRequestId};
|
||||
pub use self::cipher::{Cipher, CipherId, RepromptType};
|
||||
|
||||
@ -341,6 +341,16 @@ table! {
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
archives (user_uuid, cipher_uuid) {
|
||||
user_uuid -> Text,
|
||||
cipher_uuid -> Text,
|
||||
archived_at -> Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
joinable!(archives -> users (user_uuid));
|
||||
joinable!(archives -> ciphers (cipher_uuid));
|
||||
joinable!(attachments -> ciphers (cipher_uuid));
|
||||
joinable!(ciphers -> organizations (organization_uuid));
|
||||
joinable!(ciphers -> users (user_uuid));
|
||||
@ -372,6 +382,7 @@ joinable!(auth_requests -> users (user_uuid));
|
||||
joinable!(sso_users -> users (user_uuid));
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
archives,
|
||||
attachments,
|
||||
ciphers,
|
||||
ciphers_collections,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user