diff --git a/src/backend/file_server/build.gradle b/src/backend/file_server/build.gradle index d81ed064..e5c3cf1e 100644 --- a/src/backend/file_server/build.gradle +++ b/src/backend/file_server/build.gradle @@ -25,24 +25,30 @@ repositories { dependencies { implementation 'org.springframework.boot:spring-boot-starter-web' + implementation 'org.springframework.boot:spring-boot-starter-data-jpa' compileOnly 'org.projectlombok:lombok' - runtimeOnly 'org.postgresql:postgresql' annotationProcessor 'org.projectlombok:lombok' + runtimeOnly 'org.postgresql:postgresql' testImplementation 'org.springframework.boot:spring-boot-starter-test' testRuntimeOnly 'org.junit.platform:junit-platform-launcher' - implementation(platform("software.amazon.awssdk:bom:2.27.21")) - implementation 'software.amazon.awssdk:s3' - implementation 'org.apache.tika:tika-core:2.8.0' - - implementation 'org.springframework.boot:spring-boot-starter-data-jpa' implementation 'commons-fileupload:commons-fileupload:1.4' //common-module implementation project(":common_module") + + // AWS SDK v2 + implementation platform('software.amazon.awssdk:bom:2.17.106') + implementation 'software.amazon.awssdk:s3' + implementation 'software.amazon.awssdk:sts' + implementation 'software.amazon.awssdk:netty-nio-client' + + // Spring AOP (비동기 처리 관련) + implementation 'org.springframework.boot:spring-boot-starter-aop' } + tasks.named('test') { useJUnitPlatform() } diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/AsyncConfig.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/AsyncConfig.java new file mode 100644 index 00000000..60756e08 --- /dev/null +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/AsyncConfig.java @@ -0,0 +1,43 @@ +package com.jootalkpia.file_server.config; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.scheduling.annotation.EnableAsync; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; +import java.util.concurrent.Executor; + +@Configuration +@EnableAsync +public class AsyncConfig { + + @Bean(name = "fileUploadExecutor") + public Executor fileUploadExecutor() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setCorePoolSize(10); + executor.setMaxPoolSize(50); + executor.setQueueCapacity(100); + executor.setThreadNamePrefix("File-Upload-Executor-"); + executor.setKeepAliveSeconds(60); + executor.setAllowCoreThreadTimeOut(true); + executor.initialize(); + return executor; + } + + // ✅ 병합 작업 전용 스레드 풀 + @Bean(name = "fileMergeExecutor") + public Executor fileMergeExecutor() { + ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setCorePoolSize(5); // 병합 작업은 비교적 적은 스레드로 처리 + executor.setMaxPoolSize(10); // 최대 10개의 병합 작업 처리 + executor.setQueueCapacity(50); // 병합 작업 대기열 설정 + executor.setThreadNamePrefix("File-Merge-Executor-"); + executor.setKeepAliveSeconds(30); + executor.setAllowCoreThreadTimeOut(true); + + // ✅ 병합 작업 우선순위 높이기 (우선순위: 1이 가장 높음) + executor.setThreadPriority(Thread.MAX_PRIORITY); + + executor.initialize(); + return executor; + } +} diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/S3Config.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/S3Config.java index e0e9bb19..456fe240 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/S3Config.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/config/S3Config.java @@ -5,8 +5,17 @@ import org.springframework.context.annotation.Configuration; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.core.retry.backoff.FullJitterBackoffStrategy; + +import java.net.URI; +import java.time.Duration; @Configuration public class S3Config { @@ -20,13 +29,61 @@ public class S3Config { @Value("${spring.cloud.aws.credentials.secret-key}") private String secretKey; + @Value("${spring.cloud.aws.s3.bucket}") + private String bucketName; + + /** + * 비동기 S3AsyncClient 설정 + */ + @Bean + public S3AsyncClient s3AsyncClient() { + // NettyNioAsyncHttpClient 설정 + SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(100) // 최대 동시 연결 수 제한 + .maxPendingConnectionAcquires(5000) // 최대 대기 연결 수 제한 + .connectionAcquisitionTimeout(Duration.ofSeconds(60)) // 연결 획득 대기 시간 연장 + .connectionTimeout(Duration.ofSeconds(30)) // 연결 타임아웃 + .readTimeout(Duration.ofSeconds(60)) // 데이터 읽기 타임아웃 + .writeTimeout(Duration.ofSeconds(60)) // 데이터 쓰기 타임아웃 + .build(); + + // Retry 정책 설정 (Full Jitter Backoff) + RetryPolicy retryPolicy = RetryPolicy.builder() + .backoffStrategy(FullJitterBackoffStrategy.builder() + .baseDelay(Duration.ofMillis(500)) // 최소 지연 시간 + .maxBackoffTime(Duration.ofSeconds(10)) // 최대 지연 시간 + .build()) + .numRetries(3) // 최대 재시도 횟수 + .build(); + + // S3AsyncClient 설정 + return S3AsyncClient.builder() + .region(Region.of(region)) + .endpointOverride(URI.create("https://s3." + region + ".amazonaws.com")) // S3 엔드포인트 + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKey, secretKey) + )) + .httpClient(httpClient) + .overrideConfiguration(c -> c.retryPolicy(retryPolicy)) // Retry 정책 설정 + .build(); + } + + /** + * 동기 S3Client 설정 + */ @Bean public S3Client s3Client() { return S3Client.builder() .region(Region.of(region)) + .endpointOverride(URI.create("https://s3." + region + ".amazonaws.com")) // S3 엔드포인트 .credentialsProvider(StaticCredentialsProvider.create( AwsBasicCredentials.create(accessKey, secretKey) )) + .serviceConfiguration(S3Configuration.builder() + .checksumValidationEnabled(false) // Checksum 검증 비활성화 (선택) + .build() + ) .build(); } } + diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/controller/FileController.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/controller/FileController.java index 2b5b82e4..b07369aa 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/controller/FileController.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/controller/FileController.java @@ -12,7 +12,12 @@ import com.jootalkpia.file_server.utils.ValidationUtils; import com.jootalkpia.passport.anotation.CurrentUser; import com.jootalkpia.passport.component.UserInfo; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.core.io.InputStreamResource; @@ -23,6 +28,7 @@ import org.springframework.web.bind.annotation.ModelAttribute; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RequestPart; @@ -45,12 +51,34 @@ public ResponseEntity testEndpoint() { return ResponseEntity.ok("Test successful"); } + // 시간 측정을 위한 Map 선언 + public static final Map UPLOAD_TIME_TRACKER = new ConcurrentHashMap<>(); + public static final List RESPONSE_TIMES = Collections.synchronizedList(new ArrayList<>()); + @GetMapping("/init-upload/{tempFileIdentifier}") - public ResponseEntity> initFileUpload(@PathVariable String tempFileIdentifier) { + public ResponseEntity> initFileUpload(@PathVariable String tempFileIdentifier, @RequestParam String mimeType) { log.info("init-upload 요청 받음: {}", tempFileIdentifier); - return ResponseEntity.ok(Map.of("code", 200, "status", "complete")); + + // ✅ 요청 시작 시간 기록 + long startTime = System.currentTimeMillis(); + log.info("✅ 요청 시작 시간 (Unix Timestamp): {}, tempFileIdentifier: {}", startTime / 1000, tempFileIdentifier); + log.info("✅ 요청 시작 시간 (UTC): {}", java.time.Instant.now()); + + // 초기화 처리 + Long fileId = fileService.initiateMultipartUpload(tempFileIdentifier, mimeType); + + // ✅ UPLOAD_TIME_TRACKER에 시작 시간 저장 + UPLOAD_TIME_TRACKER.put(fileId, startTime); + + // 초기화 완료 응답 + Map response = new HashMap<>(); + response.put("fileId", fileId); + response.put("status", "initialized"); + return ResponseEntity.ok(response); } + + // @DeleteMapping("/fileId") // public ResponseEntity deleteFile(@PathVariable Long fileId) { // log.info("got deleteFile id: {}", fileId); @@ -72,10 +100,10 @@ public ResponseEntity> uploadThumbnail(@RequestParam Long fi public ResponseEntity uploadFileChunk( @RequestParam("workspaceId") Long workspaceId, @RequestParam("channelId") Long channelId, - @RequestParam("tempFileIdentifier") String tempFileIdentifier, + @RequestParam("fileId") Long fileId, @RequestParam("totalChunks") Long totalChunks, - @RequestParam("totalSize") Long totalSize, @RequestParam("chunkIndex") Long chunkIndex, + @RequestParam("mimeType") String mimeType, @RequestPart("chunk") MultipartFile chunk) { log.info("청크 업로드 요청: chunkIndex={}, totalChunks={}", chunkIndex, totalChunks); @@ -83,19 +111,20 @@ public ResponseEntity uploadFileChunk( ValidationUtils.validateWorkSpaceId(workspaceId); ValidationUtils.validateChannelId(channelId); ValidationUtils.validateFile(chunk); - ValidationUtils.validateFileId(tempFileIdentifier); +// ValidationUtils.validateFileId(fileId); ValidationUtils.validateTotalChunksAndChunkIndex(totalChunks, chunkIndex); // DTO로 변환 MultipartChunk multipartChunk = new MultipartChunk(chunkIndex, chunk); UploadChunkRequestDto request = new UploadChunkRequestDto( - workspaceId, channelId, tempFileIdentifier, totalChunks, totalSize, multipartChunk + workspaceId, channelId, fileId, totalChunks, mimeType, multipartChunk ); - Object response = fileService.uploadFileChunk(request); + Object response = fileService.uploadChunk(request); return ResponseEntity.ok(response); } + @PostMapping("/small") public ResponseEntity uploadFile(@ModelAttribute UploadFileRequestDto uploadFileRequest) { log.info("got uploadFileRequest: {}", uploadFileRequest.getWorkspaceId()); @@ -107,48 +136,32 @@ public ResponseEntity uploadFile(@ModelAttribute UploadFi return ResponseEntity.ok(response); } - - @PostMapping - public ResponseEntity uploadFiles(@ModelAttribute UploadFilesRequestDto uploadFileRequest) { - log.info("got uploadFileRequest: {}", uploadFileRequest); - ValidationUtils.validateLengthOfFilesAndThumbnails(uploadFileRequest.getFiles().length, uploadFileRequest.getThumbnails().length); - ValidationUtils.validateWorkSpaceId(uploadFileRequest.getWorkspaceId()); - ValidationUtils.validateChannelId(uploadFileRequest.getChannelId()); - ValidationUtils.validateFiles(uploadFileRequest.getFiles()); - ValidationUtils.validateFiles(uploadFileRequest.getThumbnails()); - - Long userId = 1L; - - log.info("got uploadFileRequest: {}", uploadFileRequest.getFiles().length); - UploadFilesResponseDto response = fileService.uploadFiles(userId, uploadFileRequest); - return ResponseEntity.ok(response); - } - - @GetMapping("/{fileId}") - public ResponseEntity downloadFile(@PathVariable Long fileId) { - log.info("got downloadFile id: {}", fileId); - ValidationUtils.validateFileId(fileId); - - ResponseInputStream s3InputStream = fileService.downloadFile(fileId); - - // response 생성 - long contentLength = s3InputStream.response().contentLength(); - - // Content-Type 가져오기 기 본값: application/octet-stream - String contentType = s3InputStream.response().contentType() != null - ? s3InputStream.response().contentType() - : MediaType.APPLICATION_OCTET_STREAM_VALUE; - - // 헤더 설정 - HttpHeaders headers = new HttpHeaders(); - headers.setContentType(MediaType.parseMediaType(contentType)); - headers.setContentLength(contentLength); - headers.setContentDispositionFormData("attachment", "file-" + fileId); - - return ResponseEntity.ok() - .headers(headers) - .body(new InputStreamResource(s3InputStream)); - } + // TODO: url 제공으로 수정 +// @GetMapping("/{fileId}") +// public ResponseEntity downloadFile(@PathVariable Long fileId) { +// log.info("got downloadFile id: {}", fileId); +// ValidationUtils.validateFileId(fileId); +// +// ResponseInputStream s3InputStream = fileService.downloadFile(fileId); +// +// // response 생성 +// long contentLength = s3InputStream.response().contentLength(); +// +// // Content-Type 가져오기 기본값: application/octet-stream +// String contentType = s3InputStream.response().contentType() != null +// ? s3InputStream.response().contentType() +// : MediaType.APPLICATION_OCTET_STREAM_VALUE; +// +// // 헤더 설정 +// HttpHeaders headers = new HttpHeaders(); +// headers.setContentType(MediaType.parseMediaType(contentType)); +// headers.setContentLength(contentLength); +// headers.setContentDispositionFormData("attachment", "file-" + fileId); +// +// return ResponseEntity.ok() +// .headers(headers) +// .body(new InputStreamResource(s3InputStream)); +// } @PostMapping("/profile-image") public ResponseEntity changeProfile( diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/dto/UploadChunkRequestDto.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/dto/UploadChunkRequestDto.java index 541bf699..6112d0e8 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/dto/UploadChunkRequestDto.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/dto/UploadChunkRequestDto.java @@ -11,8 +11,8 @@ public class UploadChunkRequestDto { private Long workspaceId; private Long channelId; - private String tempFileIdentifier; + private Long fileId; private Long totalChunks; - private Long chunkSize; + private String mimeType; private MultipartChunk chunkInfo; } \ No newline at end of file diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/entity/Files.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/entity/FilesEntity.java similarity index 82% rename from src/backend/file_server/src/main/java/com/jootalkpia/file_server/entity/Files.java rename to src/backend/file_server/src/main/java/com/jootalkpia/file_server/entity/FilesEntity.java index 704f88ac..4b7e04de 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/entity/Files.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/entity/FilesEntity.java @@ -4,14 +4,15 @@ import jakarta.persistence.GeneratedValue; import jakarta.persistence.GenerationType; import jakarta.persistence.Id; -import java.time.LocalDateTime; +import jakarta.persistence.Table; import lombok.Getter; import lombok.Setter; @Entity @Getter @Setter -public class Files extends BaseTimeEntity{ +@Table(name = "Files") +public class FilesEntity extends BaseTimeEntity{ @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long fileId; diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/exception/common/ErrorCode.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/exception/common/ErrorCode.java index 3ce31146..d4c6ffea 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/exception/common/ErrorCode.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/exception/common/ErrorCode.java @@ -33,6 +33,8 @@ public enum ErrorCode { CHUNK_PROCESSING_FAILED("F50008", "청크 처리 중 오류가 발생했습니다."), CHUNK_MERGING_FAILED("F50007", "청크 병합 중 오류가 발생했습니다."), MIMETYPE_DETECTION_FAILED("F50009", "mimetype 감지에 실패했습니다."), + CHUNK_INITIALIZE_FAILED("F50010", "chunk 업로드 초기화가 되어있지 않습니다."), + CONTENT_TYPE_SETTING_FAILED("F50011", "Content-Type 설정에 실패하였습니다. "), UNEXPECTED_ERROR("F50006", "예상치 못한 오류가 발생했습니다."); private final String code; diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/repository/FileRepository.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/repository/FileRepository.java index 6d5e903a..6de13146 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/repository/FileRepository.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/repository/FileRepository.java @@ -1,10 +1,10 @@ package com.jootalkpia.file_server.repository; -import com.jootalkpia.file_server.entity.Files; +import com.jootalkpia.file_server.entity.FilesEntity; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.stereotype.Repository; @Repository -public interface FileRepository extends JpaRepository { +public interface FileRepository extends JpaRepository { } diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileService.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileService.java index 028013de..99d3fdb8 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileService.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileService.java @@ -6,28 +6,32 @@ import com.jootalkpia.file_server.dto.UploadFilesRequestDto; import com.jootalkpia.file_server.dto.UploadFileResponseDto; import com.jootalkpia.file_server.dto.UploadFilesResponseDto; -import com.jootalkpia.file_server.entity.Files; +import com.jootalkpia.file_server.entity.FilesEntity; import com.jootalkpia.file_server.entity.User; import com.jootalkpia.file_server.exception.common.CustomException; import com.jootalkpia.file_server.exception.common.ErrorCode; import com.jootalkpia.file_server.repository.FileRepository; import com.jootalkpia.file_server.repository.UserRepository; -import jakarta.transaction.Transactional; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; +import java.nio.file.Path; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Service; import org.springframework.web.multipart.MultipartFile; import software.amazon.awssdk.core.ResponseInputStream; +import org.springframework.transaction.annotation.Transactional; +import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.GetObjectResponse; @Service @@ -35,119 +39,124 @@ @RequiredArgsConstructor public class FileService { - private final FileRepository fileRepository; private final S3Service s3Service; - private final UserRepository userRepository; + private final FileRepository fileRepository; private final FileTypeDetector fileTypeDetector; + private final UserRepository userRepository; - // 청크 저장을 위한 Map (tempFileIdentifier 기준으로 리스트 저장) - private static final ConcurrentHashMap> TEMP_FILE_STORAGE = new ConcurrentHashMap<>(); + @Autowired + @Qualifier("fileMergeExecutor") + private Executor fileMergeExecutor; - @Transactional - public Object uploadFileChunk(UploadChunkRequestDto request) { - MultipartFile chunkFile = request.getChunkInfo().getChunk(); - String tempFileIdentifier = request.getTempFileIdentifier(); - int totalChunks = request.getTotalChunks().intValue(); - int chunkIndex = request.getChunkInfo().getChunkIndex().intValue(); - log.info("Processing chunk {} of {}", chunkIndex, totalChunks); + // tempIdentifier: uploadId + private static final ConcurrentHashMap UPLOAD_ID_STORAGE = new ConcurrentHashMap<>(); - try { - // 청크 저장 리스트 불러오고 없으면 생성 - List chunkList = TEMP_FILE_STORAGE.computeIfAbsent(tempFileIdentifier, k -> new ArrayList<>(totalChunks)); - - // 리스트 크기를 totalChunks 크기로 확장 - while (chunkList.size() < totalChunks) { - chunkList.add(null); - } + // ✅ CopyOnWriteArrayList 사용 +// uploadId: etag + private static final ConcurrentHashMap> PART_TAG_STORAGE = new ConcurrentHashMap<>(); - int adjustedIndex = chunkIndex - 1; + public Long initiateMultipartUpload(String tempFileIdentifier, String mimeType) { + FilesEntity filesEntity = new FilesEntity(); + filesEntity.setMimeType(mimeType); + filesEntity.setFileType(mimeType.startsWith("video/") ? "VIDEO" : "IMAGE"); + FilesEntity savedEntity = fileRepository.save(filesEntity); + Long fileId = savedEntity.getFileId(); - File tempChunkFile = File.createTempFile("chunk_" + chunkIndex, ".part"); - appendChunkToFile(tempChunkFile, chunkFile); - chunkList.set(adjustedIndex, tempChunkFile); + String uploadId = s3Service.initiateMultipartUpload(fileId, mimeType); + UPLOAD_ID_STORAGE.put(fileId, uploadId); + log.info("Upload ID 저장 - TempFileIdentifier: {}, Upload ID: {}", tempFileIdentifier, uploadId); - // 모든 청크가 수신 완료되었는지 확인 (전체 크기가 맞으면 병합) - if (chunkList.size() == totalChunks && chunkList.stream().allMatch(java.util.Objects::nonNull)) { - log.info("모든 청크가 도착함 - 병합 시작 (임시 파일 ID: {})", tempFileIdentifier); - return finalizeFileUpload(tempFileIdentifier, chunkList); - } - } catch (IOException e) { - throw new CustomException(ErrorCode.CHUNK_PROCESSING_FAILED.getCode(), ErrorCode.CHUNK_PROCESSING_FAILED.getMsg()); - } - - // 병합이 완료되지 않은 경우 기본 응답 반환 - Map response = new HashMap<>(); - response.put("code", 200); - response.put("status", "partial"); - return response; + return savedEntity.getFileId(); } - private UploadFileResponseDto finalizeFileUpload(String tempFileIdentifier, List chunkList) { - try { - File mergedFile = mergeChunks(chunkList, tempFileIdentifier); + public Object uploadChunk(UploadChunkRequestDto request) { + MultipartFile chunkFile = request.getChunkInfo().getChunk(); + Long fileId = request.getFileId(); + int totalChunks = request.getTotalChunks().intValue(); + int chunkIndex = request.getChunkInfo().getChunkIndex().intValue(); + String mimeType = request.getMimeType(); + log.info("{}", fileId); - Files filesEntity = new Files(); - fileRepository.save(filesEntity); - Long fileId = filesEntity.getFileId(); + log.info("uploadFileChunk request: {}", chunkIndex); + log.info("UPLOAD_ID_STORAGE 상태 - Size: {}, Keys: {}", UPLOAD_ID_STORAGE.size(), UPLOAD_ID_STORAGE.keySet()); + + String uploadId = UPLOAD_ID_STORAGE.get(fileId); + if (uploadId == null) { + log.error("uploadId 없음 - tempFileIdentifier: {}", fileId); + throw new CustomException(ErrorCode.CHUNK_INITIALIZE_FAILED.getCode(), ErrorCode.CHUNK_INITIALIZE_FAILED.getMsg()); + } - // S3 업로드 - String fileType = fileTypeDetector.detectFileTypeFromFile(mergedFile); - String s3Url = s3Service.uploadFileMultipart(mergedFile, fileType.toLowerCase() + "s/", fileId); + log.info("청크 업로드 중 - Upload ID: {}, Chunk Index: {}", uploadId, chunkIndex); - filesEntity.setUrl(s3Url); - filesEntity.setFileType(fileType); - filesEntity.setFileSize(mergedFile.length()); - filesEntity.setMimeType(fileTypeDetector.detectMimeType(mergedFile)); - fileRepository.save(filesEntity); + String s3Key = s3Service.makeKey(fileId, mimeType); + try { + CompletableFuture future = s3Service.asyncUploadPartToS3(fileId, uploadId, chunkIndex, chunkFile.getBytes(), s3Key); + + future.thenAccept(completedPart -> { + // ✅ CopyOnWriteArrayList를 사용하여 동기화 필요 없음 + PART_TAG_STORAGE.computeIfAbsent(uploadId, k -> new CopyOnWriteArrayList<>()).add(completedPart); + + // ✅ 마지막 청크 여부 확인 및 즉시 병합 + if (isLastChunk(totalChunks, uploadId)) { + // ✅ 병합 작업을 병합 전용 스레드 풀에서 즉시 실행 + CompletableFuture.runAsync(() -> { + s3Service.completeMultipartUpload(fileId, uploadId, PART_TAG_STORAGE.get(uploadId), mimeType); + log.info("모든 청크 업로드 완료 및 병합 완료: {}", uploadId); + + // ✅ time 로그, with fileId + log.info("✅ 파일 병합 완료 시간 (Unix Timestamp): {}, fileId: {}", System.currentTimeMillis() / 1000, fileId); + log.info("✅ 파일 병합 완료 시간 (UTC): {}", java.time.Instant.now()); + + // ✅ 상태 초기화 + UPLOAD_ID_STORAGE.remove(fileId); + PART_TAG_STORAGE.remove(uploadId); + }, fileMergeExecutor); + } + }); - // 임시 데이터 정리 - TEMP_FILE_STORAGE.remove(tempFileIdentifier); - chunkList.forEach(File::delete); + // 각 청크 업로드 완료 시 응답 + Map response = new HashMap<>(); + response.put("code", 200); + response.put("status", "partial"); + response.put("message", "청크 업로드 완료"); + return response; - return new UploadFileResponseDto("200", "complete", fileId, fileType); } catch (IOException e) { - throw new CustomException(ErrorCode.FILE_PROCESSING_FAILED.getCode(), ErrorCode.FILE_PROCESSING_FAILED.getMsg()); + throw new CustomException(ErrorCode.CHUNK_PROCESSING_FAILED.getCode(), ErrorCode.CHUNK_PROCESSING_FAILED.getMsg()); } } - // 청크를 임시 파일에 추가 - private void appendChunkToFile(File tempFile, MultipartFile chunkFile) throws IOException { - try (FileOutputStream fos = new FileOutputStream(tempFile); - InputStream inputStream = chunkFile.getInputStream()) { - byte[] buffer = new byte[8192]; - int bytesRead; - while ((bytesRead = inputStream.read(buffer)) != -1) { - fos.write(buffer, 0, bytesRead); - } + private boolean isLastChunk(int totalChunks, String uploadId) { + // ✅ CopyOnWriteArrayList 사용으로 동기화 필요 없음 + CopyOnWriteArrayList completedParts = PART_TAG_STORAGE.get(uploadId); + + // ✅ 업로드된 청크 개수와 totalChunks 비교 + if (completedParts != null && completedParts.size() == totalChunks) { + log.info("모든 청크가 S3에 업로드됨 - 업로드 ID: {}", uploadId); + return true; } + return false; } - // 청크 파일 병합 - private File mergeChunks(List chunkList, String tempFileIdentifier) throws IOException { - File mergedFile = File.createTempFile("merged_" + tempFileIdentifier, ".tmp"); - - try (FileOutputStream fos = new FileOutputStream(mergedFile)) { - for (File chunk : chunkList) { - try (InputStream inputStream = new FileInputStream(chunk)) { - byte[] buffer = new byte[8192]; - int bytesRead; - while ((bytesRead = inputStream.read(buffer)) != -1) { - fos.write(buffer, 0, bytesRead); - } - } - } - } catch (IOException e) { - throw new CustomException(ErrorCode.CHUNK_MERGING_FAILED.getCode(), ErrorCode.CHUNK_MERGING_FAILED.getMsg()); - } - return mergedFile; + public String defineFolderToUpload(String fileType) { + if ("VIDEO".equalsIgnoreCase(fileType)) { + return "videos"; + } else if ("IMAGE".equalsIgnoreCase(fileType)) { + return "images"; + } else if ("THUMBNAIL".equalsIgnoreCase(fileType)) { + return "thumbnails"; + } else { + log.error("정의되지 않은 파일 타입: {}", fileType); + throw new CustomException(ErrorCode.UNSUPPORTED_FILE_TYPE.getCode(), "지원하지 않는 파일 타입"); + } } @Transactional public void uploadThumbnail(Long fileId, MultipartFile thumbnail) { log.info("upload thumbnail file id: {}", fileId); - Files fileEntity = fileRepository.findById(fileId) + FilesEntity fileEntity = fileRepository.findById(fileId) .orElseThrow(() -> new CustomException(ErrorCode.FILE_NOT_FOUND.getCode(), ErrorCode.FILE_NOT_FOUND.getMsg())); if (!"VIDEO".equals(fileEntity.getFileType())) { @@ -158,15 +167,24 @@ public void uploadThumbnail(Long fileId, MultipartFile thumbnail) { try { String suffix = fileTypeDetector.detectFileTypeFromMultipartFile(thumbnail); log.info("upload thumbnail file id: {}", fileId); - File tempFile = File.createTempFile("thumbnail_", suffix); + + // 임시 파일 생성 (Path -> File 변환) + Path tempPath = java.nio.file.Files.createTempFile("thumbnail_", suffix); + File tempFile = tempPath.toFile(); // Path -> File 변환 + + // MultipartFile -> File 전송 thumbnail.transferTo(tempFile); String folder = "thumbnails/"; String urlThumbnail = s3Service.uploadFileMultipart(tempFile, folder, fileId); fileEntity.setUrlThumbnail(urlThumbnail); + fileRepository.save(fileEntity); - tempFile.delete(); + // 임시 파일 삭제 + if (tempFile.exists()) { + tempFile.delete(); + } } catch (IOException e) { throw new CustomException(ErrorCode.FILE_PROCESSING_FAILED.getCode(), "Failed to process thumbnail file"); } @@ -178,7 +196,7 @@ public UploadFileResponseDto uploadFile(UploadFileRequestDto uploadFileRequestDt String fileType = fileTypeDetector.detectFileTypeFromMultipartFile(uploadFileRequestDto.getFile()); Long fileId = null; - Files filesEntity = new Files(); + FilesEntity filesEntity = new FilesEntity(); fileRepository.save(filesEntity); fileId = filesEntity.getFileId(); @@ -209,7 +227,7 @@ public UploadFilesResponseDto uploadFiles(Long userId, UploadFilesRequestDto upl for (int i = 0; i < files.length; i++) { Long fileId = null; - Files filesEntity = new Files(); + FilesEntity filesEntity = new FilesEntity(); fileRepository.save(filesEntity); fileId = filesEntity.getFileId(); @@ -247,7 +265,7 @@ private String uploadEachFile(String fileType, Long fileId, MultipartFile file) public ResponseInputStream downloadFile(Long fileId) { // 파일 조회 - Files fileEntity = fileRepository.findById(fileId) + FilesEntity fileEntity = fileRepository.findById(fileId) .orElseThrow(() -> new CustomException(ErrorCode.FILE_NOT_FOUND.getCode(), ErrorCode.FILE_NOT_FOUND.getMsg())); // 폴더 결정 @@ -257,17 +275,6 @@ public ResponseInputStream downloadFile(Long fileId) { return s3Service.downloadFile(folder, fileId); } - public String defineFolderToUpload(String fileType) { - if ("VIDEO".equalsIgnoreCase(fileType)) { - return "videos"; - } else if ("IMAGE".equalsIgnoreCase(fileType)) { - return "images"; - } else if ("THUMBNAIL".equalsIgnoreCase(fileType)) { - return "thumbnails"; - } else { - throw new CustomException(ErrorCode.FILE_PROCESSING_FAILED.getCode(), ErrorCode.FILE_PROCESSING_FAILED.getMsg()); - } - } @Transactional public ChangeProfileResponseDto changeProfile(Long userId, MultipartFile newImage) { diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileTypeDetector.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileTypeDetector.java index 9fbef313..be85daa6 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileTypeDetector.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/FileTypeDetector.java @@ -5,6 +5,8 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.net.URL; +import java.net.URLConnection; import lombok.extern.slf4j.Slf4j; import org.apache.tika.Tika; import org.springframework.stereotype.Service; @@ -32,7 +34,7 @@ public String detectFileTypeFromFile(File file) { } } - private String detectFileType(Object file) throws IOException { + public String detectFileType(Object file) throws IOException { String mimeType = detectMimeType(file); if (mimeType.startsWith("image/")) { @@ -46,17 +48,44 @@ private String detectFileType(Object file) throws IOException { public String detectMimeType(Object file) { try { + String mimeType; if (file instanceof InputStream) { - return tika.detect((InputStream) file); + mimeType = tika.detect((InputStream) file); } else if (file instanceof File) { - return tika.detect((File) file); + mimeType = tika.detect((File) file); } else { throw new IllegalArgumentException("Unsupported file type for detection"); } + + log.info("파일 MIME 타입: {}", mimeType); + return mimeType; } catch (IOException e) { log.warn("MIME 타입 감지 실패, 기본값 사용: binary/octet-stream", e); throw new CustomException(ErrorCode.MIMETYPE_DETECTION_FAILED.getCode(), ErrorCode.MIMETYPE_DETECTION_FAILED.getMsg()); } } + + public File convertMultipartToFile(MultipartFile multipartFile) { + try { + File tempFile = File.createTempFile("temp_", multipartFile.getOriginalFilename()); + multipartFile.transferTo(tempFile); + return tempFile; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public String detectFileTypeFromS3(String s3Key, String bucketName) { + try { + URL url = new URL("https://" + bucketName + ".s3.amazonaws.com/" + s3Key); + URLConnection connection = url.openConnection(); + return connection.getContentType(); + } catch (IOException e) { + log.error("파일 타입 감지 실패: {}", s3Key, e); + return "application/octet-stream"; + } + } + + } \ No newline at end of file diff --git a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/S3Service.java b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/S3Service.java index 51cd8063..404ecdc3 100644 --- a/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/S3Service.java +++ b/src/backend/file_server/src/main/java/com/jootalkpia/file_server/service/S3Service.java @@ -1,21 +1,30 @@ package com.jootalkpia.file_server.service; +import static com.jootalkpia.file_server.controller.FileController.RESPONSE_TIMES; +import static com.jootalkpia.file_server.controller.FileController.UPLOAD_TIME_TRACKER; + +import com.jootalkpia.file_server.entity.FilesEntity; import com.jootalkpia.file_server.exception.common.CustomException; import com.jootalkpia.file_server.exception.common.ErrorCode; +import com.jootalkpia.file_server.repository.FileRepository; import java.util.Arrays; +import java.util.Comparator; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; import org.springframework.web.multipart.MultipartFile; import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.*; import java.io.*; -import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.List; @@ -26,7 +35,9 @@ public class S3Service { private final S3Client s3Client; + private final S3AsyncClient s3AsyncClient; private final FileTypeDetector fileTypeDetector; + private final FileRepository fileRepository; @Value("${spring.cloud.aws.s3.bucket}") private String bucketName; @@ -34,6 +45,150 @@ public class S3Service { @Value("${spring.cloud.aws.region.static}") private String region; + public String makeKey(Long fileId, String mimeType) { + String filePath; + if (mimeType.startsWith("video/")) { + filePath = "videos/"; + } else filePath = "images/"; + return filePath + fileId; + } + + public String initiateMultipartUpload(Long fileId, String mimeType) { + + String s3Key = makeKey(fileId, mimeType); + + log.info("initialized with s3key : {}", s3Key); + + CreateMultipartUploadRequest createRequest = CreateMultipartUploadRequest.builder() + .bucket(bucketName) + .key(s3Key) + .build(); + + try { + CompletableFuture createResponse = s3AsyncClient.createMultipartUpload(createRequest); + String uploadId = createResponse.join().uploadId(); + log.info("S3 멀티파트 업로드 초기화: {}, uploadId: {}", fileId, uploadId); + return uploadId; + } catch (Exception e) { + log.error("S3 멀티파트 업로드 초기화 실패: {}", fileId, e); + throw new CustomException(ErrorCode.FILE_PROCESSING_FAILED.getCode(), "S3 멀티파트 업로드 초기화 실패"); + } + } + + public CompletableFuture asyncUploadPartToS3(Long fileId, String uploadId, int partNumber, byte[] chunkData, String s3Key) { + UploadPartRequest uploadPartRequest = UploadPartRequest.builder() + .bucket(bucketName) + .key(s3Key) + .uploadId(uploadId) + .partNumber(partNumber) + .build(); + + return s3AsyncClient.uploadPart(uploadPartRequest, AsyncRequestBody.fromBytes(chunkData)) + .thenApply(uploadPartResponse -> { + CompletedPart completedPart = CompletedPart.builder() + .partNumber(partNumber) + .eTag(uploadPartResponse.eTag()) + .build(); + log.info("청크 업로드 완료 - 파트 번호: {}", partNumber); + return completedPart; + }).exceptionally(ex -> { + log.error("청크 업로드 실패 - 파트 번호: {}, 이유: {}", partNumber, ex.getMessage(), ex); + throw new CustomException(ErrorCode.FILE_PROCESSING_FAILED.getCode(), "청크 업로드 실패"); + }); + } + + public void completeMultipartUpload(Long fileId, String uploadId, List completedParts, String mimeType) { + String s3Key = makeKey(fileId, mimeType); + + completedParts.sort(Comparator.comparingInt(CompletedPart::partNumber)); + + CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder() + .parts(completedParts) + .build(); + + CompleteMultipartUploadRequest completeRequest = CompleteMultipartUploadRequest.builder() + .bucket(bucketName) + .key(s3Key) + .uploadId(uploadId) + .multipartUpload(completedMultipartUpload) + .build(); + + s3AsyncClient.completeMultipartUpload(completeRequest) + .thenAccept(completeMultipartUploadResponse -> { + log.info("S3 멀티파트 업로드 완료: {}", s3Key); + log.info("source {}", bucketName + s3Key); + + // Content-Type 설정을 위한 CopyObjectRequest 추가 + CopyObjectRequest copyObjectRequest = CopyObjectRequest.builder() + .sourceBucket(bucketName) + .sourceKey(s3Key) + .destinationBucket(bucketName) + .destinationKey(s3Key) + .contentType(mimeType) + .metadataDirective(MetadataDirective.REPLACE.toString()) + .build(); + + s3AsyncClient.copyObject(copyObjectRequest) + .thenAccept(copyObjectResponse -> { + log.info("S3 Content-Type 설정 완료: {}, 타입: {}", s3Key, mimeType); + + // FilesEntity 가져오기 및 URL 업데이트 + FilesEntity filesEntity = fileRepository.findById(fileId) + .orElseThrow(() -> new CustomException(ErrorCode.FILE_NOT_FOUND.getCode(), "해당 ID의 파일을 찾을 수 없습니다.")); + + filesEntity.setUrl(completeMultipartUploadResponse.location()); + fileRepository.save(filesEntity); + + // ✅ 업로드 완료 시간 기록 + long endTime = System.currentTimeMillis(); + log.info("✅ 파일 병합 완료 시간 (Unix Timestamp): {}, fileId: {}", endTime / 1000, fileId); + log.info("✅ 파일 병합 완료 시간 (UTC): {}", java.time.Instant.now()); + + // ✅ 시작 시간 가져오기 및 총 소요 시간 계산 + Long startTime = UPLOAD_TIME_TRACKER.get(fileId); + if (startTime != null) { + long totalTime = endTime - startTime; + double totalTimeInSeconds = totalTime / 1000.0; + log.info("\n--- 결과 요약 ---"); + log.info("총 소요 시간: {} 초, fileId: {}", totalTimeInSeconds, fileId); + + // ✅ 응답 시간 통계 계산 + double averageResponseTime = RESPONSE_TIMES.stream().mapToLong(Long::longValue).average().orElse(0.0); + long maxResponseTime = RESPONSE_TIMES.stream().mapToLong(Long::longValue).max().orElse(0L); + long minResponseTime = RESPONSE_TIMES.stream().mapToLong(Long::longValue).min().orElse(0L); + + // ✅ 성공률 및 실패율 계산 (파일 1개 기준) + int totalRequests = RESPONSE_TIMES.size(); + int successCount = totalRequests; + int failureCount = 0; + double successRate = (successCount / (double) totalRequests) * 100; + double failureRate = (failureCount / (double) totalRequests) * 100; + + // ✅ 결과 요약 출력 + log.info("\n--- 결과 요약 ---"); + log.info("총 소요 시간: {} 초", totalTimeInSeconds); + log.info("성공률: {}%", successRate); + log.info("실패율: {}%", failureRate); + + // ✅ UPLOAD_TIME_TRACKER 및 RESPONSE_TIMES 초기화 + UPLOAD_TIME_TRACKER.remove(fileId); + RESPONSE_TIMES.clear(); + } else { + log.warn("❌ 시작 시간 정보 없음 - fileId: {}", fileId); + } + }).exceptionally(ex -> { + log.error("S3 Content-Type 설정 실패: {}", s3Key, ex); + throw new CustomException(ErrorCode.CONTENT_TYPE_SETTING_FAILED.getCode(), "Content-Type 설정 실패"); + }); + + }).exceptionally(ex -> { + log.error("S3 멀티파트 업로드 병합 실패: {}", s3Key, ex); + throw new CustomException(ErrorCode.CHUNK_MERGING_FAILED.getCode(), ErrorCode.CHUNK_MERGING_FAILED.getMsg()); + }); + } + + + // 멀티파트 업로드 방식으로 S3에 파일 업로드 public String uploadFileMultipart(File file, String folder, Long fileId) { String key = folder + fileId; @@ -122,15 +277,18 @@ private void abortMultipartUpload(String bucket, String key, String uploadId) { public String uploadFile(MultipartFile file, String folder, Long fileId) { - Path tempFile = null; + java.nio.file.Path tempPath = null; log.info("Ready to upload file to S3 bucket: {}", bucketName); try { // S3에 저장될 파일 키 생성 String key = folder + fileId; - // 임시 파일 생성 - tempFile = Files.createTempFile("temp-", ".tmp"); - file.transferTo(tempFile.toFile()); + // 임시 파일 생성 (Path -> File 변환) + tempPath = java.nio.file.Files.createTempFile("temp-", ".tmp"); + java.io.File tempFile = tempPath.toFile(); + + // MultipartFile -> File 전송 + file.transferTo(tempFile); // S3에 업로드 s3Client.putObject( @@ -139,7 +297,7 @@ public String uploadFile(MultipartFile file, String folder, Long fileId) { .key(key) .contentType(file.getContentType()) .build(), - tempFile); + tempPath); log.info("파일 업로드 완료 - S3 Key: {}", "https://" + bucketName + ".s3." + region + ".amazonaws.com/" + key); return "https://" + bucketName + ".s3." + region + ".amazonaws.com/" + key; @@ -159,9 +317,9 @@ public String uploadFile(MultipartFile file, String folder, Long fileId) { } finally { // 임시 파일 삭제 try { - if (tempFile != null && Files.exists(tempFile)) { - Files.delete(tempFile); - log.info("임시 파일 삭제 완료: {}", tempFile); + if (tempPath != null && java.nio.file.Files.exists(tempPath)) { + java.nio.file.Files.delete(tempPath); + log.info("임시 파일 삭제 완료: {}", tempPath); } } catch (IOException e) { log.warn("임시 파일 삭제 실패: {}", e.getMessage(), e); @@ -169,6 +327,7 @@ public String uploadFile(MultipartFile file, String folder, Long fileId) { } } + public ResponseInputStream downloadFile(String folder, Long fileId) { String key = folder + "/" + fileId;