stream#Readable JavaScript Examples

The following examples show how to use stream#Readable. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: analysis-manager.js    From OpenEDR with GNU General Public License v3.0 6 votes vote down vote up
/**
   * @method analyze
   * @description Analyze a file.
   * @param {Readable} file A file's Readable stream.
   * @param {String} [codeItemType] The type of the binary file uploaded, can be either 'file' or 'memory_module'. Default: `'file'`.
   * @param {Boolean} [disableDynamicExecution] Disable Intezer Analyze's Dynamic Execution process. Default: `false`
   * @param {Boolean} [disableStaticExtraction] Disable Intezer Analyze's Static Extraction process. Default: `false`
   * @returns {Promise<String>} The analysis' URL.
   */
  async analyze(file, ...options) {
    return raw.analyze(await this.client.token.get(), file, ...options);
  }
Example #2
Source File: index.js    From OpenEDR with GNU General Public License v3.0 6 votes vote down vote up
/**
   * @method analyze A shortcut for `Client.analysis.analyze()` but that will convert a file's path to a readable stream beforehand.
   * @param {String/Readable} file The file's path or Reable stream.
   * @param  {...any} options
   * @returns {Promise<String>} The analysis' URL.
   */
  async analyze(file, ...options) {
    if (!(file instanceof Readable) && typeof file == 'string')
      file = createReadStream(file);

    return this.analysis.analyze(file, ...options);
  }
Example #3
Source File: utils.js    From rally-core-addon with Mozilla Public License 2.0 6 votes vote down vote up
/**
 * Generate a Rally test study add-on.
 *
 * @param {String} directory
 *        The directory in which to create the add-on file.
 * @param {Object} [options={}]
 *        An optional set of options for the study.
 * @param {String} [options.addonId="[email protected]"]
 *        A string containing the addon id to use for the study.
 * @param {String} [options.backgroundScript=STUDY_BACKGROUND_SCRIPT]
 *        A string containing the code for the background script.
 *
 * @return {String} the full path of the addon file.
 */
async function generateTestStudyAddon(
  directory,
  options={}
) {
  const addonId =
    options.addonId || "[email protected]";
  const manifest = `
{
  "manifest_version": 2,
  "name": "Rally Integration Test Add-on",
  "version": "1.0",
  "applications": {
    "gecko": {
      "id": "${addonId}",
      "strict_min_version": "84.0a1"
    }
  },
  "permissions": [],
  "background": {
    "page": "background.html"
  }
}
`;
  let tempFile =
    path.join(directory, "test-rally-study.xpi");

  const backgroundScript =
    options.backgroundScript || STUDY_BACKGROUND_SCRIPT;

  var output = fs.createWriteStream(tempFile);
  var archive = archiver("zip", { store: true });
  archive.on("error", err => { throw err; });
  archive.pipe(output);

  // For this to be a valid study add-on, we need: a manifest,
  // rally.js and a background script.
  await archive
    .append(Readable.from(manifest), { name: "manifest.json" })
    .append(Readable.from(STUDY_BACKGROUND_PAGE), { name: "background.html" })
    .append(
      Readable.from(backgroundScript), { name: "background.js" })
    .append(
      fs.createReadStream("./support/rally.js"), { name: 'rally.js' })
    .finalize();

  return tempFile;
}
Example #4
Source File: stream.js    From nanoexpress with Apache License 2.0 6 votes vote down vote up
export default function requestStream(req, res) {
  const cache = [];
  const stream = new Readable({
    read() {
      // any read?
    }
  });
  req.stream = stream;

  res.onData((chunk, isLast) => {
    cache[0] = Buffer.from(chunk);

    stream.push(Buffer.concat(cache));

    if (isLast) {
      stream.push(null);
      cache[0] = null;
    }
  });
}
Example #5
Source File: normalizers.spec.js    From nanoexpress with Apache License 2.0 6 votes vote down vote up
describe('body normalize', () => {
  it('body normalize non-empty', async () => {
    const stream = new Readable({
      read() {
        // mock read
      }
    });
    const fakeReq = {
      stream,
      headers: { 'content-type': 'application/json' }
    };
    const fakeRes = {
      onAborted() {
        // mock handler
      }
    };

    stream.push(Buffer.concat([Buffer.from('fake body')]));
    setTimeout(() => stream.push(null), 50);

    await body(fakeReq, fakeRes);
    expect(fakeReq.body).toStrictEqual(Buffer.from('fake body'));
  });
  it('body normalize empty', async () => {
    const fakeReq = {};

    expect(await body(fakeReq)).toBe(undefined);
  });
});
Example #6
Source File: index.js    From kit with MIT License 6 votes vote down vote up
/** @type {import('@sveltejs/kit/node').getRequest} */
export async function getRequest(base, req) {
	let headers = /** @type {Record<string, string>} */ (req.headers);
	if (req.httpVersionMajor === 2) {
		// we need to strip out the HTTP/2 pseudo-headers because node-fetch's
		// Request implementation doesn't like them
		// TODO is this still true with Node 18
		headers = Object.assign({}, headers);
		delete headers[':method'];
		delete headers[':path'];
		delete headers[':authority'];
		delete headers[':scheme'];
	}

	const request = new Request(base + req.url, {
		method: req.method,
		headers,
		body: get_raw_body(req)
	});

	request.formData = async () => {
		return new NodeFetchRequest(request.url, {
			method: request.method,
			headers: request.headers,
			// @ts-expect-error TypeScript doesn't understand that ReadableStream implements Symbol.asyncIterator
			body: request.body && Readable.from(request.body)
		}).formData();
	};

	return request;
}
Example #7
Source File: debug.js    From aresrpg with MIT License 5 votes vote down vote up
/**
 * @param {{ world: any, app: import('fastify').FastifyInstance }} param
 */
function behavior({ world, app }) {
  const serializer = new XMLSerializer()

  const behavior_trees = Object.entries(trees).map(([type, tree]) => ({
    id: type,
    name: Entities[type].displayName,
    tree: serializer.serializeToString(tree),
    instances: world.mobs.all
      .filter(mob => mob.type === type)
      .map(({ entity_id }) => ({
        id: entity_id,
      })),
  }))

  app.get('/behavior', async () => behavior_trees)

  const instances = world.mobs.all.map(({ entity_id }) => ({
    entity_id,
    current: {},
    stream: new PassThrough({ objectMode: true }),
  }))

  app.get('/behavior/:id', (request, reply) => {
    reply.type('text/event-stream')
    reply.header('Connection', 'keep-alive')

    const format = data => `data: ${JSON.stringify(data)}\n\n`

    /** @type {Object} */
    const { params } = request

    const instance = instances.find(
      ({ entity_id }) => entity_id === Number(params.id)
    )

    const stream = new PassThrough()

    for (const data of Object.entries(instance.current)) {
      stream.write(format(data))
    }

    Readable.from(
      aiter(instance.stream[Symbol.asyncIterator]()).map(format)
    ).pipe(stream)

    reply.send(stream)
  })

  const statuses = {
    [SUCCESS]: 'SUCCESS',
    [FAILURE]: 'FAILURE',
    [RUNNING]: 'RUNNING',
  }

  return ({ context: { path, entity_id }, result }) => {
    const status = statuses[result.status]

    const instance = instances.find(({ entity_id: id }) => id === entity_id)

    const key = path
      .split('.')
      .slice(1) // Skip tree.
      .filter((e, i) => i % 2 === 1)
      .join('.')

    instance.current = {
      ...instance.current,
      [key]: status,
    }

    instance.stream.write([key, status])
  }
}
Example #8
Source File: analyze.js    From OpenEDR with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Post a file for analysis.
 *
 * @function analyze
 * @param {String} accessToken A valid API Access token. Use {@link module:Intezer.raw} to get one.
 * @param {ReadableStream} fileStream The file's buffer.
 * @param {String} [codeItemType] The type of the binary file uploaded, can be either 'file' or 'memory_module'. Default: `'file'`.
 * @param {Boolean} [disableDynamicExecution] Disable Intezer Analyze's Dynamic Execution process. Default: `false`
 * @param {Boolean} [disableStaticExtraction] Disable Intezer Analyze's Static Extraction process. Default: `false`
 * @returns {Promise<String>} Analysis URL.
 *
 * @see https://analyze.intezer.com/api/docs/documentation#post-analyze
 */

function analyze(
  accessToken,
  fileStream,
  codeItemType = 'file',
  disableDynamicExecution = false,
  disableStaticExtraction = false
) {
  return new Promise((resolve, reject) => {
    if (!accessToken || typeof accessToken !== 'string')
      return reject(
        new Error('No Access Token provided ! Refer to documentation.')
      );

    if (!fileStream || !(fileStream instanceof Readable))
      return reject(new Error('No file buffer ! Refer to documentation.'));

    const form = FormData();

    const options = {
      hostname: 'analyze.intezer.com',
      protocol: 'https:',
      port: 443,
      path: '/api/v2-0/analyze',
      method: 'POST',
      headers: {
        Authorization: `Bearer ${accessToken}`,
        'Content-Type': form.getHeaders()['content-type'],
      },
    };

    form.append('file', fileStream);
    form.append('code_item_type', codeItemType.toString());
    form.append(
      'disable_dynamic_execution',
      disableDynamicExecution.toString()
    );
    form.append(
      'disable_static_extraction',
      disableStaticExtraction.toString()
    );

    form.submit(options, (err, res) => {
      let data = '';

      res.on('data', (d) => {
        data += d;
      });

      res.on('end', () => {
        // console.log(JSON.parse(data))
        if (res.statusCode === 201) return resolve(JSON.parse(data).result_url);
        reject([res.statusCode, JSON.parse(data)]); // Returns beautified error status
      });
    });
  });
}
Example #9
Source File: S3Store.js    From reaction-file-collections-sa-s3 with MIT License 5 votes vote down vote up
/**
   * This retrieves objects from S3 and sends them to reaction-file-collections as a readable stream.
   * The whole point of using S3 being hitting your content's URLs, either directly or through a CDN,
   * this might not be what you're looking for. It's there to preserve reaction-file-collection's default
   * behavior.
   */
  async _getReadStream(fileKey, { start: startPos, end: endPos } = {}) {
    debug("S3Store _getReadStream");

    const opts = {
      Bucket: process.env.AWS_S3_BUCKET,
      Key: fileKey._id
    };

    // Add range if this should be a partial read
    if (typeof startPos === "number" && typeof endPos === "number") {
      opts.Range = `bytes=${startPos}-${endPos}`;
    }

    debug("S3Store _getReadStream opts:", opts);

    const object = await this.s3.getObject(opts).promise();

    debug("S3Store _getReadStream got object:", object);

    let totalTransferredData = 0;

    const stream = new Readable({
      read: (size) => {
        debug(`S3Store read body from ${totalTransferredData} to ${totalTransferredData + size}`);
        const body = object.Body.slice(totalTransferredData, totalTransferredData + size);

        totalTransferredData += size;

        debug(`S3Store _getReadStream transferred ${totalTransferredData}`);

        stream.push(body);

        if ((typeof endPos === "number" && totalTransferredData >= endPos) || totalTransferredData >= fileKey.size) {
          debug("S3Store _getReadStream ending stream");
          stream.push(null);
        }
      }
    });

    return stream;
  }
Example #10
Source File: csv-parser.js    From medusa with MIT License 4 votes vote down vote up
describe("CsvParser", () => {
  describe("parse", () => {
    const csvParser = new CsvParser(createContainer(), {
      columns: [],
    })

    let csvContent =
      'title,subtitle\n"T-shirt","summer tee"\n"Sunglasses","Red sunglasses"'

    let expectedProducts = [
      {
        title: "T-shirt",
        subtitle: "summer tee",
      },
      {
        title: "Sunglasses",
        subtitle: "Red sunglasses",
      },
    ]

    afterEach(() => {
      jest.clearAllMocks()
    })

    it("given a readable stream, can parse the stream content", async () => {
      const stream = Readable.from(csvContent)
      const content = await csvParser.parse(stream)

      expect(content).toEqual(expectedProducts)
    })
  })

  describe("buildData", () => {
    describe("schema validation", () => {
      class TitleValidator extends AbstractCsvValidator {
        async validate(builtLine) {
          if (/\d/.test(builtLine["title"])) {
            throw new Error("title should not contain a number")
          }
          return true
        }
      }

      const schema = {
        columns: [
          {
            name: "title",
            validator: new TitleValidator(createContainer()),
          },
          {
            name: "size",
          },
          {
            name: "height",
          },
        ],
      }

      const csvParser = new CsvParser(createContainer(), schema)

      it("given a line containing a column which is not defined in the schema, then validation should fail", async () => {
        try {
          await csvParser.buildData([
            {
              title: "sunglasses",
              size: "M",
              height: "100",
              first_name: "lebron",
            },
          ])
        } catch (err) {
          expect(err.message).toEqual(
            "Unable to treat column first_name from the csv file. No target column found in the provided schema"
          )
        }
      })

      it("given a line containing a column which does not pass a validation constraint, then validation should fail", async () => {
        try {
          await csvParser.buildData([
            { title: "contains a number 1", size: "M", height: "100" },
          ])
        } catch (err) {
          expect(err.message).toEqual("title should not contain a number")
        }
      })

      it("given a line which passes all validation constraints, then should returned validated content", async () => {
        const content = await csvParser.buildData([
          { title: "great product", size: "M", height: "100" },
        ])

        expect(content).toEqual([
          {
            title: "great product",
            size: "M",
            height: "100",
          },
        ])
      })

      it("given a line which does not provide a value for a required column, then should throw an error", async () => {
        try {
          await csvParser.buildData([{ size: "S", height: "100" }])
        } catch (err) {
          expect(err.message).toEqual(
            `Missing column(s) "title" from the given csv file`
          )
        }
      })

      it("given a line which does not provide a value for multiple required columns, then should throw an error", async () => {
        try {
          await csvParser.buildData([{ size: "S" }])
        } catch (err) {
          expect(err.message).toEqual(
            `Missing column(s) "title", "height" from the given csv file`
          )
        }
      })

      it("given a line which does not provide a value for a required column, then should throw an error", async () => {
        try {
          await csvParser.buildData([
            { title: "t-shirt", height: "100", size: "" },
          ])
        } catch (err) {
          expect(err.message).toEqual(
            `No value found for target column "size" in line 1 of the given csv file`
          )
        }
      })
    })

    describe("mapTo", () => {
      const csvParser = new CsvParser(createContainer(), {
        columns: [
          {
            name: "title",
            mapTo: "product_title",
          },
        ],
      })

      it("given a mapTo field for a column, when building data including that column, should rename the column name to what mapTo refers to", async () => {
        const content = await csvParser.buildData([{ title: "a product" }])

        expect(content).toEqual([
          {
            product_title: "a product",
          },
        ])
      })
    })

    describe("transformer", () => {
      const csvParser = new CsvParser(createContainer(), {
        columns: [
          {
            name: "title",
          },
          {
            name: "price usd",
            transform: (value) => Math.round(Number(value) * 100),
          },
        ],
      })

      it("given a transformer function for a column, when building data, should transform that column's value according to the transformation function", async () => {
        const content = await csvParser.buildData([
          { title: "medusa t-shirt", "price usd": "19.99" },
        ])

        expect(content).toEqual([
          {
            title: "medusa t-shirt",
            "price usd": 1999,
          },
        ])
      })
    })

    describe("match", () => {
      describe("regex", () => {
        const csvParser = new CsvParser(createContainer(), {
          columns: [
            {
              name: "title",
            },
            {
              name: "prices",
              match: /.*Variant Price.*/i,
              transform: (value) => Math.round(Number(value) * 100),
            },
          ],
        })

        it("given a column with the match property as regex and a transformer, when building data, should resolve that column for all entries in the line that match the regex", async () => {
          const content = await csvParser.buildData([
            {
              title: "medusa t-shirt",
              "variant price usd": "19.99",
              "variant price cad": "26.79",
              "variant price dkk": "1389",
            },
            {
              title: "medusa sunglasses",
              "variant price usd": "9.99",
              "variant price cad": "16.79",
              "variant price dkk": "389",
            },
          ])

          expect(content).toEqual([
            {
              title: "medusa t-shirt",
              "variant price usd": 1999,
              "variant price cad": 2679,
              "variant price dkk": 138900,
            },
            {
              title: "medusa sunglasses",
              "variant price usd": 999,
              "variant price cad": 1679,
              "variant price dkk": 38900,
            },
          ])
        })
      })

      describe("reducer", () => {
        const schema = {
          columns: [
            {
              name: "title",
            },
            {
              name: "prices",
              match: /.*Variant Price ([a-z]+).*/i,
              reducer: (builtLine, key, value) => {
                const [, currency_code] = key.match(
                  /.*Variant Price ([a-z]+).*/i
                )
                const existingPrices = builtLine.prices ?? []
                const price = {
                  amount: Math.round(Number(value) * 100),
                  currency_code,
                }
                return {
                  ...builtLine,
                  prices: [...existingPrices, price],
                }
              },
              validator: {
                validate: (builtLine) => {
                  const unexistingCurrency = builtLine.prices?.find(
                    (price) => !currencies[price.currency_code.toUpperCase()]
                  )
                  if (unexistingCurrency) {
                    throw new Error(
                      `wrong currency: ${unexistingCurrency.currency_code}`
                    )
                  }
                  return true
                },
              },
            },
          ],
        }
        const csvParser = new CsvParser(createContainer(), schema)

        it("given a column with match and reducer properties, when building data, should return the result of the reducer function", async () => {
          const content = await csvParser.buildData([
            {
              title: "medusa t-shirt",
              "variant price usd": "19.99",
              "variant price cad": "26.79",
              "variant price dkk": "1389",
            },
            {
              title: "medusa sunglasses",
              "variant price usd": "9.99",
              "variant price cad": "16.79",
              "variant price dkk": "389",
            },
          ])

          expect(content).toEqual([
            {
              title: "medusa t-shirt",
              prices: [
                {
                  currency_code: "usd",
                  amount: 1999,
                },
                {
                  currency_code: "cad",
                  amount: 2679,
                },
                {
                  currency_code: "dkk",
                  amount: 138900,
                },
              ],
            },
            {
              title: "medusa sunglasses",
              prices: [
                {
                  currency_code: "usd",
                  amount: 999,
                },
                {
                  currency_code: "cad",
                  amount: 1679,
                },
                {
                  currency_code: "dkk",
                  amount: 38900,
                },
              ],
            },
          ])
        })

        it("given a column with match and reducer properties, when building data, should run validation on the built data", async () => {
          try {
            await csvParser.buildData([
              {
                title: "medusa t-shirt",
                "variant price usd": "19.99",
                "variant price cad": "26.79",
                "variant price grp": "1389",
              },
              {
                title: "medusa sunglasses",
                "variant price usd": "9.99",
                "variant price cad": "16.79",
                "variant price grp": "389",
              },
            ])
          } catch (err) {
            expect(err.message).toEqual("wrong currency: grp")
          }
        })

        describe("invalid column properties", () => {
          const schema = {
            columns: [
              {
                name: "title",
              },
              {
                name: "variants",
                match: /.*Variant Price ([a-z]+).*/i,
                mapTo: "prices",
              },
            ],
          }
          const csvParser = new CsvParser(createContainer(), schema)

          it("given a column with match and mapTo property, when building data, then the mapTo property should be ignored", async () => {
            const content = await csvParser.buildData([
              {
                title: "medusa t-shirt",
                "variant price usd": "19.99",
                "variant price cad": "26.79",
                "variant price dkk": "1389",
              },
              {
                title: "medusa sunglasses",
                "variant price usd": "9.99",
                "variant price cad": "16.79",
                "variant price dkk": "389",
              },
            ])

            expect(content).toEqual([
              {
                title: "medusa t-shirt",
                "variant price usd": "19.99",
                "variant price cad": "26.79",
                "variant price dkk": "1389",
              },
              {
                title: "medusa sunglasses",
                "variant price usd": "9.99",
                "variant price cad": "16.79",
                "variant price dkk": "389",
              },
            ])
          })
        })
      })
    })
  })
})