Files
medusa-store/packages/core/framework/package.json
Harminder Virk cf0297f74a feat: implement stream based processing of the files (#12574)
Fixes: FRMW-2960

This PR adds support for processing large CSV files by breaking them into chunks and processing one chunk at a time. This is how it works in nutshell.

- The CSV file is read as a stream and each chunk of the stream is one CSV row.
- We read upto 1000 rows (plus a few more to ensure product variants of a product are not split into multiple chunks).
- Each chunk is then normalized using the `CSVNormalizer` and validated using zod schemas. If there is an error, the entire process will be aborted and the existing chunks will be deleted.
- Each chunk is written to a JSON file, so that we can process them later (after user confirms) without re-processing or validating the CSV file.
- The confirmation process will start consuming one chunk at a time and create/update products using the `batchProducts` workflow.

## Resume or not to resume processing of chunks

Let's imagine during processing of chunks, we find that chunk 3 leads to a database error. However, till this time we have processed the first two chunks already. How do we deal with this situation? Options are:

- We store at which chunk we failed and then during the re-upload we ignore chunks before the failed one. In my conversation with @olivermrbl we discovered that resuming will have to work with certain assumptions if we decide to implement it.
   - What if a user updates the CSV rows which are part of the already processed chunks? These changes will be ignored and they will never notice it.
   - Resuming works if the file name is still the same. What if they made changes and saved the file with "Save as - New name". In that case we will anyways process the entire file.
   - We will have to fetch the old workflow from the workflow engine using some `ilike` search, so that we can see at which chunk the last run failed for the given file.

Co-authored-by: Carlos R. L. Rodrigues <37986729+carlos-r-l-rodrigues@users.noreply.github.com>
2025-05-29 05:42:16 +00:00

135 lines
3.7 KiB
JSON

{
"name": "@medusajs/framework",
"version": "2.8.3",
"description": "Framework",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"files": [
"dist",
"!dist/**/__tests__",
"!dist/**/__mocks__",
"!dist/**/__fixtures__"
],
"bin": {
"medusa-mikro-orm": "./dist/mikro-orm-cli/bin.js"
},
"exports": {
".": "./dist/index.js",
"./config": "./dist/config/index.js",
"./logger": "./dist/logger/index.js",
"./database": "./dist/database/index.js",
"./subscribers": "./dist/subscribers/index.js",
"./workflows": "./dist/workflows/index.js",
"./links": "./dist/links/index.js",
"./jobs": "./dist/jobs/index.js",
"./http": "./dist/http/index.js",
"./telemetry": "./dist/telemetry/index.js",
"./feature-flags": "./dist/feature-flags/index.js",
"./utils": "./dist/utils/index.js",
"./types": "./dist/types/index.js",
"./build-tools": "./dist/build-tools/index.js",
"./orchestration": "./dist/orchestration/index.js",
"./workflows-sdk": "./dist/workflows-sdk/index.js",
"./workflows-sdk/composer": "./dist/workflows-sdk/composer.js",
"./modules-sdk": "./dist/modules-sdk/index.js",
"./migrations": "./dist/migrations/index.js"
},
"engines": {
"node": ">=20"
},
"repository": {
"type": "git",
"url": "https://github.com/medusajs/medusa",
"directory": "packages/core/framework"
},
"publishConfig": {
"access": "public"
},
"author": "Medusa",
"license": "MIT",
"scripts": {
"watch": "tsc --watch ",
"watch:test": "tsc --watch",
"build": "rimraf dist && tsc --build",
"test": "jest --runInBand --bail --forceExit -- src/**/__tests__/**/*.ts"
},
"devDependencies": {
"@aws-sdk/client-dynamodb": "^3.218.0",
"@medusajs/cli": "2.8.3",
"@mikro-orm/core": "6.4.3",
"@mikro-orm/knex": "6.4.3",
"@mikro-orm/migrations": "6.4.3",
"@mikro-orm/postgresql": "6.4.3",
"@swc/core": "^1.7.28",
"@swc/jest": "^0.2.36",
"@types/cors": "^2.8.17",
"@types/jsonwebtoken": "^8.5.9",
"awilix": "^8.0.1",
"connect-dynamodb": "^3.0.5",
"ioredis": "^5.4.1",
"jest": "^29.7.0",
"pg": "^8.13.0",
"rimraf": "^3.0.2",
"supertest": "^4.0.2",
"typescript": "^5.6.2",
"vite": "^5.4.14"
},
"dependencies": {
"@jercle/yargonaut": "^1.1.5",
"@medusajs/modules-sdk": "2.8.3",
"@medusajs/orchestration": "2.8.3",
"@medusajs/telemetry": "2.8.3",
"@medusajs/types": "2.8.3",
"@medusajs/utils": "2.8.3",
"@medusajs/workflows-sdk": "2.8.3",
"@opentelemetry/api": "^1.9.0",
"@types/express": "^4.17.17",
"chokidar": "^3.4.2",
"compression": "1.7.4",
"connect-redis": "5.2.0",
"cookie-parser": "^1.4.6",
"cors": "^2.8.5",
"express": "^4.21.0",
"express-session": "^1.17.3",
"glob": "7.2.3",
"jsonwebtoken": "^9.0.2",
"lodash": "4.17.21",
"morgan": "^1.9.1",
"path-to-regexp": "^0.1.10",
"tsconfig-paths": "^4.2.0",
"zod": "3.22.4",
"zod-validation-error": "^3.4.1"
},
"peerDependencies": {
"@aws-sdk/client-dynamodb": "^3.218.0",
"@medusajs/cli": "2.8.3",
"@mikro-orm/cli": "6.4.3",
"@mikro-orm/core": "6.4.3",
"@mikro-orm/knex": "6.4.3",
"@mikro-orm/migrations": "6.4.3",
"@mikro-orm/postgresql": "6.4.3",
"awilix": "^8.0.1",
"connect-dynamodb": "^3.0.5",
"ioredis": "^5.4.1",
"pg": "^8.13.0",
"vite": "^5.4.14"
},
"peerDependenciesMeta": {
"@aws-sdk/client-dynamodb": {
"optional": true
},
"@mikro-orm/cli": {
"optional": true
},
"connect-dynamodb": {
"optional": true
},
"ioredis": {
"optional": true
},
"vite": {
"optional": true
}
}
}