base
This commit is contained in:
@@ -70,13 +70,13 @@ class ClassificationEngine:
|
||||
if disk:
|
||||
cursor.execute("""
|
||||
SELECT path, checksum
|
||||
FROM files_bak
|
||||
WHERE disk = %s AND category IS NULL
|
||||
FROM files
|
||||
WHERE disk_label = %s AND category IS NULL
|
||||
""", (disk,))
|
||||
else:
|
||||
cursor.execute("""
|
||||
SELECT path, checksum
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE category IS NULL
|
||||
""")
|
||||
|
||||
@@ -149,7 +149,7 @@ class ClassificationEngine:
|
||||
from psycopg2.extras import execute_batch
|
||||
|
||||
query = """
|
||||
UPDATE files_bak
|
||||
UPDATE files
|
||||
SET category = %s
|
||||
WHERE path = %s
|
||||
"""
|
||||
@@ -188,7 +188,7 @@ class ClassificationEngine:
|
||||
category,
|
||||
COUNT(*) as file_count,
|
||||
SUM(size) as total_size
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE category IS NOT NULL
|
||||
GROUP BY category
|
||||
ORDER BY total_size DESC
|
||||
@@ -214,7 +214,7 @@ class ClassificationEngine:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("SELECT COUNT(*) FROM files_bak WHERE category IS NULL")
|
||||
cursor.execute("SELECT COUNT(*) FROM files WHERE category IS NULL")
|
||||
count = cursor.fetchone()[0]
|
||||
|
||||
cursor.close()
|
||||
@@ -241,7 +241,7 @@ class ClassificationEngine:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
UPDATE files_bak
|
||||
UPDATE files
|
||||
SET category = %s
|
||||
WHERE category = %s
|
||||
""", (new_category, old_category))
|
||||
@@ -278,7 +278,7 @@ class ClassificationEngine:
|
||||
# Get categorized files
|
||||
cursor.execute("""
|
||||
SELECT path, category
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE category IS NOT NULL
|
||||
""")
|
||||
|
||||
@@ -326,7 +326,7 @@ class ClassificationEngine:
|
||||
|
||||
cursor.execute("""
|
||||
SELECT DISTINCT category
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE category IS NOT NULL
|
||||
ORDER BY category
|
||||
""")
|
||||
|
||||
@@ -241,7 +241,7 @@ def train_from_database(
|
||||
cursor = db_connection.cursor()
|
||||
cursor.execute("""
|
||||
SELECT path, category
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE category IS NOT NULL
|
||||
""")
|
||||
|
||||
|
||||
@@ -70,17 +70,17 @@ class DeduplicationEngine:
|
||||
if disk:
|
||||
cursor.execute("""
|
||||
SELECT path, size
|
||||
FROM files_bak
|
||||
WHERE disk = %s AND checksum IS NULL
|
||||
FROM files
|
||||
WHERE disk_label = %s AND checksum IS NULL
|
||||
ORDER BY size DESC
|
||||
""", (disk,))
|
||||
""", (disk,))
|
||||
else:
|
||||
cursor.execute("""
|
||||
SELECT path, size
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE checksum IS NULL
|
||||
ORDER BY size DESC
|
||||
""")
|
||||
""")
|
||||
|
||||
files_to_process = cursor.fetchall()
|
||||
total_files = len(files_to_process)
|
||||
@@ -108,10 +108,10 @@ class DeduplicationEngine:
|
||||
if checksum:
|
||||
# Update database
|
||||
cursor.execute("""
|
||||
UPDATE files_bak
|
||||
UPDATE files
|
||||
SET checksum = %s, duplicate_of = %s
|
||||
WHERE path = %s
|
||||
""", (checksum, duplicate_of, str(path)))
|
||||
""", (checksum, duplicate_of, str(path)))
|
||||
|
||||
stats.files_succeeded += 1
|
||||
stats.bytes_processed += size
|
||||
@@ -225,19 +225,19 @@ class DeduplicationEngine:
|
||||
if disk:
|
||||
cursor.execute("""
|
||||
SELECT checksum, array_agg(path ORDER BY path) as paths
|
||||
FROM files_bak
|
||||
WHERE disk = %s AND checksum IS NOT NULL
|
||||
FROM files
|
||||
WHERE disk_label = %s AND checksum IS NOT NULL
|
||||
GROUP BY checksum
|
||||
HAVING COUNT(*) > 1
|
||||
""", (disk,))
|
||||
""", (disk,))
|
||||
else:
|
||||
cursor.execute("""
|
||||
SELECT checksum, array_agg(path ORDER BY path) as paths
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE checksum IS NOT NULL
|
||||
GROUP BY checksum
|
||||
HAVING COUNT(*) > 1
|
||||
""")
|
||||
""")
|
||||
|
||||
duplicates = {}
|
||||
for checksum, paths in cursor.fetchall():
|
||||
@@ -262,18 +262,18 @@ class DeduplicationEngine:
|
||||
stats = {}
|
||||
|
||||
# Total files
|
||||
cursor.execute("SELECT COUNT(*) FROM files_bak WHERE checksum IS NOT NULL")
|
||||
cursor.execute("SELECT COUNT(*) FROM files WHERE checksum IS NOT NULL")
|
||||
stats['total_files'] = cursor.fetchone()[0]
|
||||
|
||||
# Unique files
|
||||
cursor.execute("SELECT COUNT(DISTINCT checksum) FROM files_bak WHERE checksum IS NOT NULL")
|
||||
cursor.execute("SELECT COUNT(DISTINCT checksum) FROM files WHERE checksum IS NOT NULL")
|
||||
stats['unique_files'] = cursor.fetchone()[0]
|
||||
|
||||
# Duplicate files
|
||||
stats['duplicate_files'] = stats['total_files'] - stats['unique_files']
|
||||
|
||||
# Total size
|
||||
cursor.execute("SELECT COALESCE(SUM(size), 0) FROM files_bak WHERE checksum IS NOT NULL")
|
||||
cursor.execute("SELECT COALESCE(SUM(size), 0) FROM files WHERE checksum IS NOT NULL")
|
||||
stats['total_size'] = cursor.fetchone()[0]
|
||||
|
||||
# Unique size
|
||||
@@ -281,10 +281,10 @@ class DeduplicationEngine:
|
||||
SELECT COALESCE(SUM(size), 0)
|
||||
FROM (
|
||||
SELECT DISTINCT ON (checksum) size
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE checksum IS NOT NULL
|
||||
) AS unique_files
|
||||
""")
|
||||
""")
|
||||
stats['unique_size'] = cursor.fetchone()[0]
|
||||
|
||||
# Wasted space
|
||||
@@ -321,14 +321,14 @@ class DeduplicationEngine:
|
||||
cursor.execute("""
|
||||
WITH canonical AS (
|
||||
SELECT DISTINCT ON (checksum) path, checksum
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE checksum IS NOT NULL
|
||||
ORDER BY checksum, path
|
||||
)
|
||||
UPDATE files_bak
|
||||
UPDATE files
|
||||
SET duplicate_of = NULL
|
||||
WHERE path IN (SELECT path FROM canonical)
|
||||
""")
|
||||
""")
|
||||
|
||||
count = cursor.rowcount
|
||||
conn.commit()
|
||||
|
||||
@@ -227,7 +227,7 @@ class HashStore:
|
||||
# Get all files with their hashes
|
||||
cursor.execute("""
|
||||
SELECT f.path, f.checksum
|
||||
FROM files_bak f
|
||||
FROM files f
|
||||
WHERE f.checksum IS NOT NULL
|
||||
""")
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ class DiscoveryEngine:
|
||||
size BIGINT NOT NULL,
|
||||
modified_time DOUBLE PRECISION NOT NULL,
|
||||
created_time DOUBLE PRECISION NOT NULL,
|
||||
disk TEXT NOT NULL,
|
||||
disk_label TEXT NOT NULL,
|
||||
checksum TEXT,
|
||||
status TEXT DEFAULT 'indexed',
|
||||
category TEXT,
|
||||
@@ -72,18 +72,18 @@ class DiscoveryEngine:
|
||||
|
||||
# Create index on path
|
||||
cursor.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_files_path ON files_bak(path)
|
||||
""")
|
||||
CREATE INDEX IF NOT EXISTS idx_files_path ON files(path)
|
||||
""")
|
||||
|
||||
# Create index on disk
|
||||
cursor.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_files_disk ON files_bak(disk)
|
||||
""")
|
||||
CREATE INDEX IF NOT EXISTS idx_files_disk ON files(disk_label)
|
||||
""")
|
||||
|
||||
# Create index on checksum
|
||||
cursor.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_files_checksum ON files_bak(checksum)
|
||||
""")
|
||||
CREATE INDEX IF NOT EXISTS idx_files_checksum ON files(checksum)
|
||||
""")
|
||||
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
@@ -136,7 +136,7 @@ class DiscoveryEngine:
|
||||
size=file_meta.size,
|
||||
modified_time=file_meta.modified_time,
|
||||
created_time=file_meta.created_time,
|
||||
disk=disk
|
||||
disk_label=disk
|
||||
)
|
||||
|
||||
batch.append(record)
|
||||
@@ -193,7 +193,7 @@ class DiscoveryEngine:
|
||||
batch: List of FileRecord objects
|
||||
"""
|
||||
query = """
|
||||
INSERT INTO files_bak (path, size, modified_time, created_time, disk, checksum, status, category, duplicate_of)
|
||||
INSERT INTO files (path, size, modified_time, created_time, disk_label, checksum, status, category, duplicate_of)
|
||||
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
|
||||
ON CONFLICT (path) DO UPDATE SET
|
||||
size = EXCLUDED.size,
|
||||
@@ -207,7 +207,7 @@ class DiscoveryEngine:
|
||||
record.size,
|
||||
record.modified_time,
|
||||
record.created_time,
|
||||
record.disk,
|
||||
record.disk_label,
|
||||
record.checksum,
|
||||
record.status,
|
||||
record.category,
|
||||
@@ -276,9 +276,9 @@ class DiscoveryEngine:
|
||||
cursor = conn.cursor()
|
||||
|
||||
if disk:
|
||||
cursor.execute("SELECT COUNT(*) FROM files_bak WHERE disk = %s", (disk,))
|
||||
cursor.execute("SELECT COUNT(*) FROM files WHERE disk_label = %s", (disk,))
|
||||
else:
|
||||
cursor.execute("SELECT COUNT(*) FROM files_bak")
|
||||
cursor.execute("SELECT COUNT(*) FROM files")
|
||||
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
@@ -298,9 +298,9 @@ class DiscoveryEngine:
|
||||
cursor = conn.cursor()
|
||||
|
||||
if disk:
|
||||
cursor.execute("SELECT COALESCE(SUM(size), 0) FROM files_bak WHERE disk = %s", (disk,))
|
||||
cursor.execute("SELECT COALESCE(SUM(size), 0) FROM files WHERE disk_label = %s", (disk,))
|
||||
else:
|
||||
cursor.execute("SELECT COALESCE(SUM(size), 0) FROM files_bak")
|
||||
cursor.execute("SELECT COALESCE(SUM(size), 0) FROM files")
|
||||
|
||||
total = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
|
||||
44
app/main.py
44
app/main.py
@@ -37,7 +37,7 @@ class FileRecord:
|
||||
path: str
|
||||
size: int
|
||||
modified_time: float
|
||||
disk: str
|
||||
disk_label: str
|
||||
checksum: Optional[str] = None
|
||||
status: str = 'indexed' # indexed, planned, moved, verified
|
||||
|
||||
@@ -49,11 +49,11 @@ class DiskReorganizer:
|
||||
"""
|
||||
if db_config is None:
|
||||
db_config = {
|
||||
'host': '192.168.1.159',
|
||||
'port': 5432,
|
||||
'database': 'disk_reorganizer_db',
|
||||
'user': 'disk_reorg_user',
|
||||
'password': 'heel-goed-wachtwoord'
|
||||
'host': os.getenv('DB_HOST', 'localhost'),
|
||||
'port': int(os.getenv('DB_PORT', 5432)),
|
||||
'database': os.getenv('DB_NAME', 'disk_reorganizer_db'),
|
||||
'user': os.getenv('DB_USER', 'disk_reorg_user'),
|
||||
'password': os.getenv('DB_PASSWORD', 'heel-goed-wachtwoord')
|
||||
}
|
||||
self.db_config = db_config
|
||||
self.init_database()
|
||||
@@ -127,12 +127,12 @@ class DiskReorganizer:
|
||||
|
||||
# PostgreSQL INSERT ... ON CONFLICT for upsert
|
||||
cursor.execute("""
|
||||
INSERT INTO files_bak (path, size, modified_time, disk, checksum, status)
|
||||
INSERT INTO files (path, size, modified_time, disk_label, checksum, status)
|
||||
VALUES (%s, %s, %s, %s, %s, %s)
|
||||
ON CONFLICT (path) DO UPDATE SET
|
||||
size = EXCLUDED.size,
|
||||
modified_time = EXCLUDED.modified_time,
|
||||
disk = EXCLUDED.disk,
|
||||
disk_label = EXCLUDED.disk_label,
|
||||
status = EXCLUDED.status
|
||||
""", (rel_path, size, mtime, disk_name, None, 'indexed'))
|
||||
|
||||
@@ -174,9 +174,9 @@ class DiskReorganizer:
|
||||
|
||||
try:
|
||||
cursor.execute("""
|
||||
SELECT disk, SUM(size) as total_size, COUNT(*) as file_count
|
||||
FROM files_bak
|
||||
GROUP BY disk
|
||||
SELECT disk_label, SUM(size) as total_size, COUNT(*) as file_count
|
||||
FROM files
|
||||
GROUP BY disk_label
|
||||
""")
|
||||
|
||||
usage = {}
|
||||
@@ -215,7 +215,7 @@ class DiskReorganizer:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute(
|
||||
"SELECT path, size, modified_time FROM files_bak WHERE disk = %s ORDER BY size DESC",
|
||||
"SELECT path, size, modified_time FROM files WHERE disk_label = %s ORDER BY size DESC",
|
||||
(target_disk,)
|
||||
)
|
||||
files_to_move = cursor.fetchall()
|
||||
@@ -265,15 +265,15 @@ class DiskReorganizer:
|
||||
'source_disk': target_disk,
|
||||
'source_path': rel_path,
|
||||
'dest_disk': dest_disk,
|
||||
'dest_path': rel_path, # Keep same relative path
|
||||
'target_path': rel_path, # Keep same relative path
|
||||
'size': size
|
||||
}
|
||||
plan['operations'].append(op)
|
||||
|
||||
# Store in database
|
||||
cursor.execute(
|
||||
"INSERT INTO operations_bak (source_path, dest_path, operation_type) VALUES (%s, %s, %s)",
|
||||
(f"{target_disk}:{rel_path}", f"{dest_disk}:{rel_path}", 'move')
|
||||
"INSERT INTO operations (source_path, target_path, operation_type, status) VALUES (%s, %s, %s, %s)",
|
||||
(f"{target_disk}:{rel_path}", f"{dest_disk}:{rel_path}", 'move', 'pending')
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
@@ -347,10 +347,10 @@ class DiskReorganizer:
|
||||
source_disk = op['source_disk']
|
||||
source_path = op['source_path']
|
||||
dest_disk = op['dest_disk']
|
||||
dest_path = op['dest_path']
|
||||
target_path = op['target_path']
|
||||
|
||||
source_full = Path(source_disk) / source_path
|
||||
dest_full = Path(dest_disk) / dest_path
|
||||
dest_full = Path(dest_disk) / target_path
|
||||
|
||||
# Dynamic progress display
|
||||
elapsed = time.time() - start_time
|
||||
@@ -384,7 +384,7 @@ class DiskReorganizer:
|
||||
if self.verify_operation(source_full, dest_full):
|
||||
# Update database
|
||||
cursor.execute(
|
||||
"UPDATE files_bak SET disk = %s, status = 'moved' WHERE path = %s AND disk = %s",
|
||||
"UPDATE files SET disk_label = %s, status = 'moved' WHERE path = %s AND disk_label = %s",
|
||||
(dest_disk, source_path, source_disk)
|
||||
)
|
||||
|
||||
@@ -393,7 +393,7 @@ class DiskReorganizer:
|
||||
|
||||
# Log operation as executed
|
||||
cursor.execute(
|
||||
"UPDATE operations_bak SET executed = 1, executed_at = CURRENT_TIMESTAMP WHERE source_path = %s",
|
||||
"UPDATE operations SET executed = 1, executed_at = CURRENT_TIMESTAMP WHERE source_path = %s",
|
||||
(f"{source_disk}:{source_path}",)
|
||||
)
|
||||
|
||||
@@ -407,7 +407,7 @@ class DiskReorganizer:
|
||||
except Exception as e:
|
||||
logger.error(f"\n Error processing {source_path}: {e}")
|
||||
cursor.execute(
|
||||
"UPDATE operations_bak SET error = %s WHERE source_path = %s",
|
||||
"UPDATE operations SET error = %s WHERE source_path = %s",
|
||||
(str(e), f"{source_disk}:{source_path}")
|
||||
)
|
||||
error_count += 1
|
||||
@@ -436,7 +436,7 @@ class DiskReorganizer:
|
||||
|
||||
try:
|
||||
cursor.execute("""
|
||||
SELECT status, COUNT(*), SUM(size) FROM files_bak GROUP BY status
|
||||
SELECT status, COUNT(*), SUM(size) FROM files GROUP BY status
|
||||
""")
|
||||
|
||||
print("\n=== FILE MIGRATION REPORT ===")
|
||||
@@ -445,7 +445,7 @@ class DiskReorganizer:
|
||||
print(f"{status:15}: {count:6} files, {self.format_size(size or 0)}")
|
||||
|
||||
cursor.execute("""
|
||||
SELECT operation_type, executed, verified, COUNT(*) FROM operations_bak GROUP BY operation_type, executed, verified
|
||||
SELECT operation_type, executed, verified, COUNT(*) FROM operations GROUP BY operation_type, executed, verified
|
||||
""")
|
||||
|
||||
print("\n=== OPERATIONS REPORT ===")
|
||||
|
||||
@@ -63,7 +63,7 @@ class MigrationEngine:
|
||||
CREATE TABLE IF NOT EXISTS operations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
source_path TEXT NOT NULL,
|
||||
dest_path TEXT NOT NULL,
|
||||
target_path TEXT NOT NULL,
|
||||
operation_type TEXT NOT NULL,
|
||||
size BIGINT DEFAULT 0,
|
||||
status TEXT DEFAULT 'pending',
|
||||
@@ -77,7 +77,7 @@ class MigrationEngine:
|
||||
# Create index on status
|
||||
cursor.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_operations_status
|
||||
ON operations_bak(status)
|
||||
ON operations(status)
|
||||
""")
|
||||
|
||||
conn.commit()
|
||||
@@ -107,7 +107,7 @@ class MigrationEngine:
|
||||
params = []
|
||||
|
||||
if disk:
|
||||
conditions.append("disk = %s")
|
||||
conditions.append("disk_label = %s")
|
||||
params.append(disk)
|
||||
|
||||
if category:
|
||||
@@ -116,7 +116,7 @@ class MigrationEngine:
|
||||
|
||||
query = f"""
|
||||
SELECT path, size, category, duplicate_of
|
||||
FROM files_bak
|
||||
FROM files
|
||||
WHERE {' AND '.join(conditions)}
|
||||
ORDER BY category, path
|
||||
"""
|
||||
@@ -133,7 +133,7 @@ class MigrationEngine:
|
||||
source = Path(path_str)
|
||||
|
||||
# Determine destination
|
||||
dest_path = self.target_base / file_category / source.name
|
||||
target_path = self.target_base / file_category / source.name
|
||||
|
||||
# Determine operation type
|
||||
if duplicate_of:
|
||||
@@ -145,7 +145,7 @@ class MigrationEngine:
|
||||
|
||||
operation = OperationRecord(
|
||||
source_path=source,
|
||||
dest_path=dest_path,
|
||||
target_path=target_path,
|
||||
operation_type=operation_type,
|
||||
size=size
|
||||
)
|
||||
@@ -200,7 +200,7 @@ class MigrationEngine:
|
||||
# In dry run, just log what would happen
|
||||
self.logger.debug(
|
||||
f"[DRY RUN] Would {operation.operation_type}: "
|
||||
f"{operation.source_path} -> {operation.dest_path}"
|
||||
f"{operation.source_path} -> {operation.target_path}"
|
||||
)
|
||||
stats.files_succeeded += 1
|
||||
else:
|
||||
@@ -261,7 +261,7 @@ class MigrationEngine:
|
||||
# Execute migration
|
||||
success = strategy.migrate(
|
||||
operation.source_path,
|
||||
operation.dest_path,
|
||||
operation.target_path,
|
||||
verify=self.processing_config.verify_operations
|
||||
)
|
||||
|
||||
@@ -293,14 +293,14 @@ class MigrationEngine:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
INSERT INTO operations_bak (
|
||||
source_path, dest_path, operation_type, size,
|
||||
INSERT INTO operations (
|
||||
source_path, target_path, operation_type, bytes_processed,
|
||||
status, error, executed_at, verified
|
||||
)
|
||||
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
|
||||
""", (
|
||||
str(operation.source_path),
|
||||
str(operation.dest_path),
|
||||
str(operation.target_path),
|
||||
operation.operation_type,
|
||||
operation.size,
|
||||
operation.status,
|
||||
@@ -321,22 +321,22 @@ class MigrationEngine:
|
||||
Returns:
|
||||
True if rollback successful
|
||||
"""
|
||||
self.logger.warning(f"Rolling back: {operation.dest_path}")
|
||||
self.logger.warning(f"Rolling back: {operation.target_path}")
|
||||
|
||||
try:
|
||||
# Remove destination
|
||||
if operation.dest_path.exists():
|
||||
operation.dest_path.unlink()
|
||||
if operation.target_path.exists():
|
||||
operation.target_path.unlink()
|
||||
|
||||
# Update database
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
UPDATE operations_bak
|
||||
UPDATE operations
|
||||
SET status = 'rolled_back'
|
||||
WHERE source_path = %s AND dest_path = %s
|
||||
""", (str(operation.source_path), str(operation.dest_path)))
|
||||
WHERE source_path = %s AND target_path = %s
|
||||
""", (str(operation.source_path), str(operation.target_path)))
|
||||
|
||||
conn.commit()
|
||||
cursor.close()
|
||||
@@ -344,7 +344,7 @@ class MigrationEngine:
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Rollback failed: {operation.dest_path}: {e}")
|
||||
self.logger.error(f"Rollback failed: {operation.target_path}: {e}")
|
||||
return False
|
||||
|
||||
def get_migration_stats(self) -> dict:
|
||||
@@ -359,13 +359,13 @@ class MigrationEngine:
|
||||
stats = {}
|
||||
|
||||
# Total operations
|
||||
cursor.execute("SELECT COUNT(*) FROM operations_bak")
|
||||
cursor.execute("SELECT COUNT(*) FROM operations")
|
||||
stats['total_operations'] = cursor.fetchone()[0]
|
||||
|
||||
# Operations by status
|
||||
cursor.execute("""
|
||||
SELECT status, COUNT(*)
|
||||
FROM operations_bak
|
||||
FROM operations
|
||||
GROUP BY status
|
||||
""")
|
||||
|
||||
@@ -375,7 +375,7 @@ class MigrationEngine:
|
||||
# Total size migrated
|
||||
cursor.execute("""
|
||||
SELECT COALESCE(SUM(size), 0)
|
||||
FROM operations_bak
|
||||
FROM operations
|
||||
WHERE status = 'completed'
|
||||
""")
|
||||
stats['total_size_migrated'] = cursor.fetchone()[0]
|
||||
@@ -396,8 +396,8 @@ class MigrationEngine:
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT source_path, dest_path, operation_type
|
||||
FROM operations_bak
|
||||
SELECT source_path, target_path, operation_type
|
||||
FROM operations
|
||||
WHERE status = 'completed' AND verified = FALSE
|
||||
""")
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ class FileRecord:
|
||||
size: int
|
||||
modified_time: float
|
||||
created_time: float
|
||||
disk: str
|
||||
disk_label: str
|
||||
checksum: str | None = None
|
||||
status: str = 'indexed' # indexed, planned, moved, verified
|
||||
category: str | None = None
|
||||
@@ -23,7 +23,7 @@ class FileRecord:
|
||||
class OperationRecord:
|
||||
"""Record of a migration operation"""
|
||||
source_path: Path
|
||||
dest_path: Path
|
||||
target_path: Path
|
||||
operation_type: str # move, copy, hardlink, symlink
|
||||
status: str = 'pending' # pending, in_progress, completed, failed
|
||||
error: str | None = None
|
||||
|
||||
@@ -12,7 +12,7 @@ class FileRecord:
|
||||
size: int
|
||||
modified_time: float
|
||||
created_time: float
|
||||
disk: str
|
||||
disk_label: str
|
||||
checksum: Optional[str] = None
|
||||
status: str = 'indexed' # indexed, planned, moved, verified
|
||||
category: Optional[str] = None
|
||||
@@ -25,7 +25,7 @@ class FileRecord:
|
||||
'size': self.size,
|
||||
'modified_time': self.modified_time,
|
||||
'created_time': self.created_time,
|
||||
'disk': self.disk,
|
||||
'disk_label': self.disk_label,
|
||||
'checksum': self.checksum,
|
||||
'status': self.status,
|
||||
'category': self.category,
|
||||
@@ -37,7 +37,7 @@ class FileRecord:
|
||||
class OperationRecord:
|
||||
"""Record of a migration operation"""
|
||||
source_path: Path
|
||||
dest_path: Path
|
||||
target_path: Path
|
||||
operation_type: str # move, copy, hardlink, symlink
|
||||
size: int = 0
|
||||
status: str = 'pending' # pending, in_progress, completed, failed
|
||||
@@ -49,7 +49,7 @@ class OperationRecord:
|
||||
"""Convert to dictionary for serialization"""
|
||||
return {
|
||||
'source_path': str(self.source_path),
|
||||
'dest_path': str(self.dest_path),
|
||||
'target_path': str(self.target_path),
|
||||
'operation_type': self.operation_type,
|
||||
'size': self.size,
|
||||
'status': self.status,
|
||||
|
||||
Reference in New Issue
Block a user