🎉 initiate project *astro_rewrite*

This commit is contained in:
sindrekjelsrud 2023-07-19 21:31:30 +02:00
parent ffd4d5e86c
commit 2ba37bfbe3
8658 changed files with 2268794 additions and 2538 deletions

12
node_modules/micromark/dev/index.d.ts generated vendored Normal file
View file

@ -0,0 +1,12 @@
export function micromark(
value: Value,
encoding: Encoding | null | undefined,
options?: Options | null | undefined
): string
export function micromark(
value: Value,
options?: Options | null | undefined
): string
export type Encoding = import('micromark-util-types').Encoding
export type Options = import('micromark-util-types').Options
export type Value = import('micromark-util-types').Value

55
node_modules/micromark/dev/index.js generated vendored Normal file
View file

@ -0,0 +1,55 @@
/**
* @typedef {import('micromark-util-types').Encoding} Encoding
* @typedef {import('micromark-util-types').Options} Options
* @typedef {import('micromark-util-types').Value} Value
*/
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* Compile markdown to HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Buffer`).
* @param {Encoding | null | undefined} encoding
* Character encoding to understand `value` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Buffer`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @param {Value} value
* Markdown to parse (`string` or `Buffer`).
* @param {Options | Encoding | null | undefined} [encoding]
* Character encoding to understand `value` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*/
export function micromark(value, encoding, options) {
if (typeof encoding !== 'string') {
options = encoding
encoding = undefined
}
return compile(options)(
postprocess(
parse(options).document().write(preprocess()(value, encoding, true))
)
)
}

24
node_modules/micromark/dev/lib/compile.d.ts generated vendored Normal file
View file

@ -0,0 +1,24 @@
/**
* @param {CompileOptions | null | undefined} [options]
* @returns {Compile}
*/
export function compile(options?: CompileOptions | null | undefined): Compile
export type Compile = import('micromark-util-types').Compile
export type CompileContext = import('micromark-util-types').CompileContext
export type CompileData = import('micromark-util-types').CompileData
export type CompileOptions = import('micromark-util-types').CompileOptions
export type Definition = import('micromark-util-types').Definition
export type Event = import('micromark-util-types').Event
export type Handle = import('micromark-util-types').Handle
export type HtmlExtension = import('micromark-util-types').HtmlExtension
export type NormalizedHtmlExtension =
import('micromark-util-types').NormalizedHtmlExtension
export type Token = import('micromark-util-types').Token
export type Media = {
image?: boolean | undefined
labelId?: string | undefined
label?: string | undefined
referenceId?: string | undefined
destination?: string | undefined
title?: string | undefined
}

1106
node_modules/micromark/dev/lib/compile.js generated vendored Normal file

File diff suppressed because it is too large Load diff

76
node_modules/micromark/dev/lib/constructs.d.ts generated vendored Normal file
View file

@ -0,0 +1,76 @@
/** @satisfies {Extension['document']} */
export const document: {
42: import('micromark-util-types').Construct
43: import('micromark-util-types').Construct
45: import('micromark-util-types').Construct
48: import('micromark-util-types').Construct
49: import('micromark-util-types').Construct
50: import('micromark-util-types').Construct
51: import('micromark-util-types').Construct
52: import('micromark-util-types').Construct
53: import('micromark-util-types').Construct
54: import('micromark-util-types').Construct
55: import('micromark-util-types').Construct
56: import('micromark-util-types').Construct
57: import('micromark-util-types').Construct
62: import('micromark-util-types').Construct
}
/** @satisfies {Extension['contentInitial']} */
export const contentInitial: {
91: import('micromark-util-types').Construct
}
/** @satisfies {Extension['flowInitial']} */
export const flowInitial: {
[-2]: import('micromark-util-types').Construct
[-1]: import('micromark-util-types').Construct
32: import('micromark-util-types').Construct
}
/** @satisfies {Extension['flow']} */
export const flow: {
35: import('micromark-util-types').Construct
42: import('micromark-util-types').Construct
45: import('micromark-util-types').Construct[]
60: import('micromark-util-types').Construct
61: import('micromark-util-types').Construct
95: import('micromark-util-types').Construct
96: import('micromark-util-types').Construct
126: import('micromark-util-types').Construct
}
/** @satisfies {Extension['string']} */
export const string: {
38: import('micromark-util-types').Construct
92: import('micromark-util-types').Construct
}
/** @satisfies {Extension['text']} */
export const text: {
[-5]: import('micromark-util-types').Construct
[-4]: import('micromark-util-types').Construct
[-3]: import('micromark-util-types').Construct
33: import('micromark-util-types').Construct
38: import('micromark-util-types').Construct
42: import('micromark-util-types').Construct
60: import('micromark-util-types').Construct[]
91: import('micromark-util-types').Construct
92: import('micromark-util-types').Construct[]
93: import('micromark-util-types').Construct
95: import('micromark-util-types').Construct
96: import('micromark-util-types').Construct
}
export namespace insideSpan {
const _null: (
| import('micromark-util-types').Construct
| {
resolveAll: import('micromark-util-types').Resolver
}
)[]
export {_null as null}
}
export namespace attentionMarkers {
const _null_1: (42 | 95)[]
export {_null_1 as null}
}
export namespace disable {
const _null_2: never[]
export {_null_2 as null}
}
export type Extension = import('micromark-util-types').Extension

101
node_modules/micromark/dev/lib/constructs.js generated vendored Normal file
View file

@ -0,0 +1,101 @@
/**
* @typedef {import('micromark-util-types').Extension} Extension
*/
import {
attention,
autolink,
blockQuote,
characterEscape,
characterReference,
codeFenced,
codeIndented,
codeText,
definition,
hardBreakEscape,
headingAtx,
htmlFlow,
htmlText,
labelEnd,
labelStartImage,
labelStartLink,
lineEnding,
list,
setextUnderline,
thematicBreak
} from 'micromark-core-commonmark'
import {codes} from 'micromark-util-symbol/codes.js'
import {resolver as resolveText} from './initialize/text.js'
/** @satisfies {Extension['document']} */
export const document = {
[codes.asterisk]: list,
[codes.plusSign]: list,
[codes.dash]: list,
[codes.digit0]: list,
[codes.digit1]: list,
[codes.digit2]: list,
[codes.digit3]: list,
[codes.digit4]: list,
[codes.digit5]: list,
[codes.digit6]: list,
[codes.digit7]: list,
[codes.digit8]: list,
[codes.digit9]: list,
[codes.greaterThan]: blockQuote
}
/** @satisfies {Extension['contentInitial']} */
export const contentInitial = {
[codes.leftSquareBracket]: definition
}
/** @satisfies {Extension['flowInitial']} */
export const flowInitial = {
[codes.horizontalTab]: codeIndented,
[codes.virtualSpace]: codeIndented,
[codes.space]: codeIndented
}
/** @satisfies {Extension['flow']} */
export const flow = {
[codes.numberSign]: headingAtx,
[codes.asterisk]: thematicBreak,
[codes.dash]: [setextUnderline, thematicBreak],
[codes.lessThan]: htmlFlow,
[codes.equalsTo]: setextUnderline,
[codes.underscore]: thematicBreak,
[codes.graveAccent]: codeFenced,
[codes.tilde]: codeFenced
}
/** @satisfies {Extension['string']} */
export const string = {
[codes.ampersand]: characterReference,
[codes.backslash]: characterEscape
}
/** @satisfies {Extension['text']} */
export const text = {
[codes.carriageReturn]: lineEnding,
[codes.lineFeed]: lineEnding,
[codes.carriageReturnLineFeed]: lineEnding,
[codes.exclamationMark]: labelStartImage,
[codes.ampersand]: characterReference,
[codes.asterisk]: attention,
[codes.lessThan]: [autolink, htmlText],
[codes.leftSquareBracket]: labelStartLink,
[codes.backslash]: [hardBreakEscape, characterEscape],
[codes.rightSquareBracket]: labelEnd,
[codes.underscore]: attention,
[codes.graveAccent]: codeText
}
/** @satisfies {Extension['insideSpan']} */
export const insideSpan = {null: [attention, resolveText]}
/** @satisfies {Extension['attentionMarkers']} */
export const attentionMarkers = {null: [codes.asterisk, codes.underscore]}
/** @satisfies {Extension['disable']} */
export const disable = {null: []}

40
node_modules/micromark/dev/lib/create-tokenizer.d.ts generated vendored Normal file
View file

@ -0,0 +1,40 @@
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* @param {InitialConstruct} initialize
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* @returns {TokenizeContext}
*/
export function createTokenizer(
parser: ParseContext,
initialize: InitialConstruct,
from?: Omit<Point, '_bufferIndex' | '_index'> | undefined
): TokenizeContext
export type Chunk = import('micromark-util-types').Chunk
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type ConstructRecord = import('micromark-util-types').ConstructRecord
export type Effects = import('micromark-util-types').Effects
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type ParseContext = import('micromark-util-types').ParseContext
export type Point = import('micromark-util-types').Point
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenType = import('micromark-util-types').TokenType
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Restore = () => void
export type Info = {
restore: Restore
from: number
}
/**
* Handle a successful run.
*/
export type ReturnHandle = (construct: Construct, info: Info) => void

671
node_modules/micromark/dev/lib/create-tokenizer.js generated vendored Normal file
View file

@ -0,0 +1,671 @@
/**
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').ConstructRecord} ConstructRecord
* @typedef {import('micromark-util-types').Effects} Effects
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').ParseContext} ParseContext
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenType} TokenType
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
/**
* @callback Restore
* @returns {void}
*
* @typedef Info
* @property {Restore} restore
* @property {number} from
*
* @callback ReturnHandle
* Handle a successful run.
* @param {Construct} construct
* @param {Info} info
* @returns {void}
*/
import createDebug from 'debug'
import {markdownLineEnding} from 'micromark-util-character'
import {push, splice} from 'micromark-util-chunked'
import {resolveAll} from 'micromark-util-resolve-all'
import {codes} from 'micromark-util-symbol/codes.js'
import {values} from 'micromark-util-symbol/values.js'
import {ok as assert} from 'uvu/assert'
const debug = createDebug('micromark')
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* @param {InitialConstruct} initialize
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* @returns {TokenizeContext}
*/
export function createTokenizer(parser, initialize, from) {
/** @type {Point} */
let point = Object.assign(
from ? Object.assign({}, from) : {line: 1, column: 1, offset: 0},
{_index: 0, _bufferIndex: -1}
)
/** @type {Record<string, number>} */
const columnStart = {}
/** @type {Array<Construct>} */
const resolveAllConstructs = []
/** @type {Array<Chunk>} */
let chunks = []
/** @type {Array<Token>} */
let stack = []
/** @type {boolean | undefined} */
let consumed = true
/**
* Tools used for tokenizing.
*
* @type {Effects}
*/
const effects = {
consume,
enter,
exit,
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
interrupt: constructFactory(onsuccessfulcheck, {interrupt: true})
}
/**
* State and tools for resolving and serializing.
*
* @type {TokenizeContext}
*/
const context = {
previous: codes.eof,
code: codes.eof,
containerState: {},
events: [],
parser,
sliceStream,
sliceSerialize,
now,
defineSkip,
write
}
/**
* The state function.
*
* @type {State | void}
*/
let state = initialize.tokenize.call(context, effects)
/**
* Track which character we expect to be consumed, to catch bugs.
*
* @type {Code}
*/
let expectedCode
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
}
return context
/** @type {TokenizeContext['write']} */
function write(slice) {
chunks = push(chunks, slice)
main()
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== codes.eof) {
return []
}
addResult(initialize, 0)
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
}
//
// Tools.
//
/** @type {TokenizeContext['sliceSerialize']} */
function sliceSerialize(token, expandTabs) {
return serializeChunks(sliceStream(token), expandTabs)
}
/** @type {TokenizeContext['sliceStream']} */
function sliceStream(token) {
return sliceChunks(chunks, token)
}
/** @type {TokenizeContext['now']} */
function now() {
// This is a hot path, so we clone manually instead of `Object.assign({}, point)`
const {line, column, offset, _index, _bufferIndex} = point
return {line, column, offset, _index, _bufferIndex}
}
/** @type {TokenizeContext['defineSkip']} */
function defineSkip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
debug('position: define skip: `%j`', point)
}
//
// State management.
//
/**
* Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
* `consume`).
* Here is where we walk through the chunks, which either include strings of
* several characters, or numerical character codes.
* The reason to do this in a loop instead of a call is so the stack can
* drain.
*
* @returns {void}
*/
function main() {
/** @type {number} */
let chunkIndex
while (point._index < chunks.length) {
const chunk = chunks[point._index]
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
}
/**
* Deal with one code.
*
* @param {Code} code
* @returns {void}
*/
function go(code) {
assert(consumed === true, 'expected character to be consumed')
consumed = undefined
debug('main: passing `%s` to %s', code, state && state.name)
expectedCode = code
assert(typeof state === 'function', 'expected state')
state = state(code)
}
/** @type {Effects['consume']} */
function consume(code) {
assert(code === expectedCode, 'expected given code to equal expected code')
debug('consume: `%s`', code)
assert(
consumed === undefined,
'expected code to not have been consumed: this might be because `return x(code)` instead of `return x` was used'
)
assert(
code === null
? context.events.length === 0 ||
context.events[context.events.length - 1][0] === 'exit'
: context.events[context.events.length - 1][0] === 'enter',
'expected last token to be open'
)
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === codes.carriageReturnLineFeed ? 2 : 1
accountForPotentialSkip()
debug('position: after eol: `%j`', point)
} else if (code !== codes.virtualSpace) {
point.column++
point.offset++
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++
// At end of string chunk.
// @ts-expect-error Points w/ non-negative `_bufferIndex` reference
// strings.
if (point._bufferIndex === chunks[point._index].length) {
point._bufferIndex = -1
point._index++
}
}
// Expose the previous character.
context.previous = code
// Mark as consumed.
consumed = true
}
/** @type {Effects['enter']} */
function enter(type, fields) {
/** @type {Token} */
// @ts-expect-error Patch instead of assign required fields to help GC.
const token = fields || {}
token.type = type
token.start = now()
assert(typeof type === 'string', 'expected string type')
assert(type.length > 0, 'expected non-empty string')
debug('enter: `%s`', type)
context.events.push(['enter', token, context])
stack.push(token)
return token
}
/** @type {Effects['exit']} */
function exit(type) {
assert(typeof type === 'string', 'expected string type')
assert(type.length > 0, 'expected non-empty string')
const token = stack.pop()
assert(token, 'cannot close w/o open tokens')
token.end = now()
assert(type === token.type, 'expected exit token to match current token')
assert(
!(
token.start._index === token.end._index &&
token.start._bufferIndex === token.end._bufferIndex
),
'expected non-empty token (`' + type + '`)'
)
debug('exit: `%s`', token.type)
context.events.push(['exit', token, context])
return token
}
/**
* Use results.
*
* @type {ReturnHandle}
*/
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
}
/**
* Discard results.
*
* @type {ReturnHandle}
*/
function onsuccessfulcheck(_, info) {
info.restore()
}
/**
* Factory to attempt/check/interrupt.
*
* @param {ReturnHandle} onreturn
* @param {{interrupt?: boolean | undefined} | undefined} [fields]
*/
function constructFactory(onreturn, fields) {
return hook
/**
* Handle either an object mapping codes to constructs, a list of
* constructs, or a single construct.
*
* @param {Array<Construct> | Construct | ConstructRecord} constructs
* @param {State} returnState
* @param {State | undefined} [bogusState]
* @returns {State}
*/
function hook(constructs, returnState, bogusState) {
/** @type {Array<Construct>} */
let listOfConstructs
/** @type {number} */
let constructIndex
/** @type {Construct} */
let currentConstruct
/** @type {Info} */
let info
return Array.isArray(constructs)
? /* c8 ignore next 1 */
handleListOfConstructs(constructs)
: 'tokenize' in constructs
? // @ts-expect-error Looks like a construct.
handleListOfConstructs([constructs])
: handleMapOfConstructs(constructs)
/**
* Handle a list of construct.
*
* @param {ConstructRecord} map
* @returns {State}
*/
function handleMapOfConstructs(map) {
return start
/** @type {State} */
function start(code) {
const def = code !== null && map[code]
const all = code !== null && map.null
const list = [
// To do: add more extension tests.
/* c8 ignore next 2 */
...(Array.isArray(def) ? def : def ? [def] : []),
...(Array.isArray(all) ? all : all ? [all] : [])
]
return handleListOfConstructs(list)(code)
}
}
/**
* Handle a list of construct.
*
* @param {Array<Construct>} list
* @returns {State}
*/
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
if (list.length === 0) {
assert(bogusState, 'expected `bogusState` to be given')
return bogusState
}
return handleConstruct(list[constructIndex])
}
/**
* Handle a single construct.
*
* @param {Construct} construct
* @returns {State}
*/
function handleConstruct(construct) {
return start
/** @type {State} */
function start(code) {
// To do: not needed to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
// Always populated by defaults.
assert(
context.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
if (
construct.name &&
context.parser.constructs.disable.null.includes(construct.name)
) {
return nok(code)
}
return construct.tokenize.call(
// If we do have fields, create an object w/ `context` as its
// prototype.
// This allows a “live binding”, which is needed for `interrupt`.
fields ? Object.assign(Object.create(context), fields) : context,
effects,
ok,
nok
)(code)
}
}
/** @type {State} */
function ok(code) {
assert(code === expectedCode, 'expected code')
consumed = true
onreturn(currentConstruct, info)
return returnState
}
/** @type {State} */
function nok(code) {
assert(code === expectedCode, 'expected code')
consumed = true
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
/**
* @param {Construct} construct
* @param {number} from
* @returns {void}
*/
function addResult(construct, from) {
if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
splice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
assert(
construct.partial ||
context.events.length === 0 ||
context.events[context.events.length - 1][0] === 'exit',
'expected last token to end'
)
}
/**
* Store state.
*
* @returns {Info}
*/
function store() {
const startPoint = now()
const startPrevious = context.previous
const startCurrentConstruct = context.currentConstruct
const startEventsIndex = context.events.length
const startStack = Array.from(stack)
return {restore, from: startEventsIndex}
/**
* Restore state.
*
* @returns {void}
*/
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
debug('position: restore: `%j`', point)
}
}
/**
* Move the current point a bit forward in the line when its on a column
* skip.
*
* @returns {void}
*/
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}
/**
* Get the chunks from a slice of chunks in the range of a token.
*
* @param {Array<Chunk>} chunks
* @param {Pick<Token, 'end' | 'start'>} token
* @returns {Array<Chunk>}
*/
function sliceChunks(chunks, token) {
const startIndex = token.start._index
const startBufferIndex = token.start._bufferIndex
const endIndex = token.end._index
const endBufferIndex = token.end._bufferIndex
/** @type {Array<Chunk>} */
let view
if (startIndex === endIndex) {
assert(endBufferIndex > -1, 'expected non-negative end buffer index')
assert(startBufferIndex > -1, 'expected non-negative start buffer index')
// @ts-expect-error `_bufferIndex` is used on string chunks.
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
const head = view[0]
if (typeof head === 'string') {
view[0] = head.slice(startBufferIndex)
} else {
assert(startBufferIndex === 0, 'expected `startBufferIndex` to be `0`')
view.shift()
}
}
if (endBufferIndex > 0) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}
/**
* Get the string value of a slice of chunks.
*
* @param {Array<Chunk>} chunks
* @param {boolean | undefined} [expandTabs=false]
* @returns {string}
*/
function serializeChunks(chunks, expandTabs) {
let index = -1
/** @type {Array<string>} */
const result = []
/** @type {boolean | undefined} */
let atTab
while (++index < chunks.length) {
const chunk = chunks[index]
/** @type {string} */
let value
if (typeof chunk === 'string') {
value = chunk
} else
switch (chunk) {
case codes.carriageReturn: {
value = values.cr
break
}
case codes.lineFeed: {
value = values.lf
break
}
case codes.carriageReturnLineFeed: {
value = values.cr + values.lf
break
}
case codes.horizontalTab: {
value = expandTabs ? values.space : values.ht
break
}
case codes.virtualSpace: {
if (!expandTabs && atTab) continue
value = values.space
break
}
default: {
assert(typeof chunk === 'number', 'expected number')
// Currently only replacement character.
value = String.fromCharCode(chunk)
}
}
atTab = chunk === codes.horizontalTab
result.push(value)
}
return result.join('')
}

5
node_modules/micromark/dev/lib/example.d.ts generated vendored Normal file
View file

@ -0,0 +1,5 @@
export default function myRemarkPluginAddingComp(
this: import('unified').Processor<void, import('mdast').Root, void, void>
):
| void
| import('unified').Transformer<import('mdast').Root, import('mdast').Root>

View file

@ -0,0 +1,7 @@
/** @type {InitialConstruct} */
export const content: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext

97
node_modules/micromark/dev/lib/initialize/content.js generated vendored Normal file
View file

@ -0,0 +1,97 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
/** @type {InitialConstruct} */
export const content = {tokenize: initializeContent}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeContent(effects) {
const contentStart = effects.attempt(
this.parser.constructs.contentInitial,
afterContentStartConstruct,
paragraphInitial
)
/** @type {Token} */
let previous
return contentStart
/** @type {State} */
function afterContentStartConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(effects, contentStart, types.linePrefix)
}
/** @type {State} */
function paragraphInitial(code) {
assert(
code !== codes.eof && !markdownLineEnding(code),
'expected anything other than a line ending or EOF'
)
effects.enter(types.paragraph)
return lineStart(code)
}
/** @type {State} */
function lineStart(code) {
const token = effects.enter(types.chunkText, {
contentType: constants.contentTypeText,
previous
})
if (previous) {
previous.next = token
}
previous = token
return data(code)
}
/** @type {State} */
function data(code) {
if (code === codes.eof) {
effects.exit(types.chunkText)
effects.exit(types.paragraph)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
effects.exit(types.chunkText)
return lineStart
}
// Data.
effects.consume(code)
return data
}
}

View file

@ -0,0 +1,12 @@
/** @type {InitialConstruct} */
export const document: InitialConstruct
export type Construct = import('micromark-util-types').Construct
export type ContainerState = import('micromark-util-types').ContainerState
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type Point = import('micromark-util-types').Point
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type StackItem = [Construct, ContainerState]

435
node_modules/micromark/dev/lib/initialize/document.js generated vendored Normal file
View file

@ -0,0 +1,435 @@
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').ContainerState} ContainerState
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
*/
/**
* @typedef {[Construct, ContainerState]} StackItem
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {splice} from 'micromark-util-chunked'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
/** @type {InitialConstruct} */
export const document = {tokenize: initializeDocument}
/** @type {Construct} */
const containerConstruct = {tokenize: tokenizeContainer}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeDocument(effects) {
const self = this
/** @type {Array<StackItem>} */
const stack = []
let continued = 0
/** @type {TokenizeContext | undefined} */
let childFlow
/** @type {Token | undefined} */
let childToken
/** @type {number} */
let lineStartOffset
return start
/** @type {State} */
function start(code) {
// First we iterate through the open blocks, starting with the root
// document, and descending through last children down to the last open
// block.
// Each block imposes a condition that the line must satisfy if the block is
// to remain open.
// For example, a block quote requires a `>` character.
// A paragraph requires a non-blank line.
// In this phase we may match all or just some of the open blocks.
// But we cannot close unmatched blocks yet, because we may have a lazy
// continuation line.
if (continued < stack.length) {
const item = stack[continued]
self.containerState = item[1]
assert(
item[0].continuation,
'expected `continuation` to be defined on container construct'
)
return effects.attempt(
item[0].continuation,
documentContinue,
checkNewContainers
)(code)
}
// Done.
return checkNewContainers(code)
}
/** @type {State} */
function documentContinue(code) {
assert(
self.containerState,
'expected `containerState` to be defined after continuation'
)
continued++
// Note: this field is called `_closeFlow` but it also closes containers.
// Perhaps a good idea to rename it but its already used in the wild by
// extensions.
if (self.containerState._closeFlow) {
self.containerState._closeFlow = undefined
if (childFlow) {
closeFlow()
}
// Note: this algorithm for moving events around is similar to the
// algorithm when dealing with lazy lines in `writeToChild`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {Point | undefined} */
let point
// Find the flow chunk.
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === types.chunkFlow
) {
point = self.events[indexBeforeFlow][1].end
break
}
}
assert(point, 'could not find previous flow chunk')
exitContainers(continued)
// Fix positions.
let index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = Object.assign({}, point)
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
return checkNewContainers(code)
}
return start(code)
}
/** @type {State} */
function checkNewContainers(code) {
// Next, after consuming the continuation markers for existing blocks, we
// look for new block starts (e.g. `>` for a block quote).
// If we encounter a new block start, we close any blocks unmatched in
// step 1 before creating the new block as a child of the last matched
// block.
if (continued === stack.length) {
// No need to `check` whether theres a container, of `exitContainers`
// would be moot.
// We can instead immediately `attempt` to parse one.
if (!childFlow) {
return documentContinued(code)
}
// If we have concrete content, such as block HTML or fenced code,
// we cant have containers “pierce” into them, so we can immediately
// start.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
return flowStart(code)
}
// If we do have flow, it could still be a blank line,
// but wed be interrupting it w/ a new container if theres a current
// construct.
// To do: next major: remove `_gfmTableDynamicInterruptHack` (no longer
// needed in micromark-extension-gfm-table@1.0.6).
self.interrupt = Boolean(
childFlow.currentConstruct && !childFlow._gfmTableDynamicInterruptHack
)
}
// Check if there is a new container.
self.containerState = {}
return effects.check(
containerConstruct,
thereIsANewContainer,
thereIsNoNewContainer
)(code)
}
/** @type {State} */
function thereIsANewContainer(code) {
if (childFlow) closeFlow()
exitContainers(continued)
return documentContinued(code)
}
/** @type {State} */
function thereIsNoNewContainer(code) {
self.parser.lazy[self.now().line] = continued !== stack.length
lineStartOffset = self.now().offset
return flowStart(code)
}
/** @type {State} */
function documentContinued(code) {
// Try new containers.
self.containerState = {}
return effects.attempt(
containerConstruct,
containerContinue,
flowStart
)(code)
}
/** @type {State} */
function containerContinue(code) {
assert(
self.currentConstruct,
'expected `currentConstruct` to be defined on tokenizer'
)
assert(
self.containerState,
'expected `containerState` to be defined on tokenizer'
)
continued++
stack.push([self.currentConstruct, self.containerState])
// Try another.
return documentContinued(code)
}
/** @type {State} */
function flowStart(code) {
if (code === codes.eof) {
if (childFlow) closeFlow()
exitContainers(0)
effects.consume(code)
return
}
childFlow = childFlow || self.parser.flow(self.now())
effects.enter(types.chunkFlow, {
contentType: constants.contentTypeFlow,
previous: childToken,
_tokenizer: childFlow
})
return flowContinue(code)
}
/** @type {State} */
function flowContinue(code) {
if (code === codes.eof) {
writeToChild(effects.exit(types.chunkFlow), true)
exitContainers(0)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
writeToChild(effects.exit(types.chunkFlow))
// Get ready for the next line.
continued = 0
self.interrupt = undefined
return start
}
effects.consume(code)
return flowContinue
}
/**
* @param {Token} token
* @param {boolean | undefined} [eof]
* @returns {void}
*/
function writeToChild(token, eof) {
assert(childFlow, 'expected `childFlow` to be defined when continuing')
const stream = self.sliceStream(token)
if (eof) stream.push(null)
token.previous = childToken
if (childToken) childToken.next = token
childToken = token
childFlow.defineSkip(token.start)
childFlow.write(stream)
// Alright, so we just added a lazy line:
//
// ```markdown
// > a
// b.
//
// Or:
//
// > ~~~c
// d
//
// Or:
//
// > | e |
// f
// ```
//
// The construct in the second example (fenced code) does not accept lazy
// lines, so it marked itself as done at the end of its first line, and
// then the content construct parses `d`.
// Most constructs in markdown match on the first line: if the first line
// forms a construct, a non-lazy line cant “unmake” it.
//
// The construct in the third example is potentially a GFM table, and
// those are *weird*.
// It *could* be a table, from the first line, if the following line
// matches a condition.
// In this case, that second line is lazy, which “unmakes” the first line
// and turns the whole into one content block.
//
// Weve now parsed the non-lazy and the lazy line, and can figure out
// whether the lazy line started a new flow block.
// If it did, we exit the current containers between the two flow blocks.
if (self.parser.lazy[token.start.line]) {
let index = childFlow.events.length
while (index--) {
if (
// The token starts before the line ending…
childFlow.events[index][1].start.offset < lineStartOffset &&
// …and either is not ended yet…
(!childFlow.events[index][1].end ||
// …or ends after it.
childFlow.events[index][1].end.offset > lineStartOffset)
) {
// Exit: theres still something open, which means its a lazy line
// part of something.
return
}
}
// Note: this algorithm for moving events around is similar to the
// algorithm when closing flow in `documentContinue`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {boolean | undefined} */
let seen
/** @type {Point | undefined} */
let point
// Find the previous chunk (the one before the lazy line).
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === types.chunkFlow
) {
if (seen) {
point = self.events[indexBeforeFlow][1].end
break
}
seen = true
}
}
assert(point, 'could not find previous flow chunk')
exitContainers(continued)
// Fix positions.
index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = Object.assign({}, point)
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
}
}
/**
* @param {number} size
* @returns {void}
*/
function exitContainers(size) {
let index = stack.length
// Exit open containers.
while (index-- > size) {
const entry = stack[index]
self.containerState = entry[1]
assert(
entry[0].exit,
'expected `exit` to be defined on container construct'
)
entry[0].exit.call(self, effects)
}
stack.length = size
}
function closeFlow() {
assert(
self.containerState,
'expected `containerState` to be defined when closing flow'
)
assert(childFlow, 'expected `childFlow` to be defined when closing it')
childFlow.write([codes.eof])
childToken = undefined
childFlow = undefined
self.containerState._closeFlow = undefined
}
}
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeContainer(effects, ok, nok) {
// Always populated by defaults.
assert(
this.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
return factorySpace(
effects,
effects.attempt(this.parser.constructs.document, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)
}

6
node_modules/micromark/dev/lib/initialize/flow.d.ts generated vendored Normal file
View file

@ -0,0 +1,6 @@
/** @type {InitialConstruct} */
export const flow: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext

83
node_modules/micromark/dev/lib/initialize/flow.js generated vendored Normal file
View file

@ -0,0 +1,83 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
import {blankLine, content} from 'micromark-core-commonmark'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
/** @type {InitialConstruct} */
export const flow = {tokenize: initializeFlow}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeFlow(effects) {
const self = this
const initial = effects.attempt(
// Try to parse a blank line.
blankLine,
atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(
this.parser.constructs.flowInitial,
afterConstruct,
factorySpace(
effects,
effects.attempt(
this.parser.constructs.flow,
afterConstruct,
effects.attempt(content, afterConstruct)
),
types.linePrefix
)
)
)
return initial
/** @type {State} */
function atBlankEnding(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEndingBlank)
effects.consume(code)
effects.exit(types.lineEndingBlank)
self.currentConstruct = undefined
return initial
}
/** @type {State} */
function afterConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
self.currentConstruct = undefined
return initial
}
}

11
node_modules/micromark/dev/lib/initialize/text.d.ts generated vendored Normal file
View file

@ -0,0 +1,11 @@
export namespace resolver {
const resolveAll: import('micromark-util-types').Resolver
}
export const string: import('micromark-util-types').InitialConstruct
export const text: import('micromark-util-types').InitialConstruct
export type Code = import('micromark-util-types').Code
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext

232
node_modules/micromark/dev/lib/initialize/text.js generated vendored Normal file
View file

@ -0,0 +1,232 @@
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
export const resolver = {resolveAll: createResolver()}
export const string = initializeFactory('string')
export const text = initializeFactory('text')
/**
* @param {'string' | 'text'} field
* @returns {InitialConstruct}
*/
function initializeFactory(field) {
return {
tokenize: initializeText,
resolveAll: createResolver(
field === 'text' ? resolveAllLineSuffixes : undefined
)
}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeText(effects) {
const self = this
const constructs = this.parser.constructs[field]
const text = effects.attempt(constructs, start, notText)
return start
/** @type {State} */
function start(code) {
return atBreak(code) ? text(code) : notText(code)
}
/** @type {State} */
function notText(code) {
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.data)
effects.consume(code)
return data
}
/** @type {State} */
function data(code) {
if (atBreak(code)) {
effects.exit(types.data)
return text(code)
}
// Data.
effects.consume(code)
return data
}
/**
* @param {Code} code
* @returns {boolean}
*/
function atBreak(code) {
if (code === codes.eof) {
return true
}
const list = constructs[code]
let index = -1
if (list) {
// Always populated by defaults.
assert(Array.isArray(list), 'expected `disable.null` to be populated')
while (++index < list.length) {
const item = list[index]
if (!item.previous || item.previous.call(self, self.previous)) {
return true
}
}
}
return false
}
}
}
/**
* @param {Resolver | undefined} [extraResolver]
* @returns {Resolver}
*/
function createResolver(extraResolver) {
return resolveAllText
/** @type {Resolver} */
function resolveAllText(events, context) {
let index = -1
/** @type {number | undefined} */
let enter
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === types.data) {
enter = index
index++
}
} else if (!events[index] || events[index][1].type !== types.data) {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
index = enter + 2
}
enter = undefined
}
}
return extraResolver ? extraResolver(events, context) : events
}
}
/**
* A rather ugly set of instructions which again looks at chunks in the input
* stream.
* The reason to do this here is that it is *much* faster to parse in reverse.
* And that we cant hook into `null` to split the line suffix before an EOF.
* To do: figure out if we can make this into a clean utility, or even in core.
* As it will be useful for GFMs literal autolink extension (and maybe even
* tables?)
*
* @type {Resolver}
*/
function resolveAllLineSuffixes(events, context) {
let eventIndex = 0 // Skip first.
while (++eventIndex <= events.length) {
if (
(eventIndex === events.length ||
events[eventIndex][1].type === types.lineEnding) &&
events[eventIndex - 1][1].type === types.data
) {
const data = events[eventIndex - 1][1]
const chunks = context.sliceStream(data)
let index = chunks.length
let bufferIndex = -1
let size = 0
/** @type {boolean | undefined} */
let tabs
while (index--) {
const chunk = chunks[index]
if (typeof chunk === 'string') {
bufferIndex = chunk.length
while (chunk.charCodeAt(bufferIndex - 1) === codes.space) {
size++
bufferIndex--
}
if (bufferIndex) break
bufferIndex = -1
}
// Number
else if (chunk === codes.horizontalTab) {
tabs = true
size++
} else if (chunk === codes.virtualSpace) {
// Empty
} else {
// Replacement character, exit.
index++
break
}
}
if (size) {
const token = {
type:
eventIndex === events.length ||
tabs ||
size < constants.hardBreakPrefixSizeMin
? types.lineSuffix
: types.hardBreakTrailing,
start: {
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size,
_index: data.start._index + index,
_bufferIndex: index
? bufferIndex
: data.start._bufferIndex + bufferIndex
},
end: Object.assign({}, data.end)
}
data.end = Object.assign({}, token.start)
if (data.start.offset === data.end.offset) {
Object.assign(data, token)
} else {
events.splice(
eventIndex,
0,
['enter', token, context],
['exit', token, context]
)
eventIndex += 2
}
}
eventIndex++
}
}
return events
}

11
node_modules/micromark/dev/lib/parse.d.ts generated vendored Normal file
View file

@ -0,0 +1,11 @@
/**
* @param {ParseOptions | null | undefined} [options]
* @returns {ParseContext}
*/
export function parse(options?: ParseOptions | null | undefined): ParseContext
export type Create = import('micromark-util-types').Create
export type FullNormalizedExtension =
import('micromark-util-types').FullNormalizedExtension
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type ParseContext = import('micromark-util-types').ParseContext
export type ParseOptions = import('micromark-util-types').ParseOptions

51
node_modules/micromark/dev/lib/parse.js generated vendored Normal file
View file

@ -0,0 +1,51 @@
/**
* @typedef {import('micromark-util-types').Create} Create
* @typedef {import('micromark-util-types').FullNormalizedExtension} FullNormalizedExtension
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').ParseContext} ParseContext
* @typedef {import('micromark-util-types').ParseOptions} ParseOptions
*/
import {combineExtensions} from 'micromark-util-combine-extensions'
import {content} from './initialize/content.js'
import {document} from './initialize/document.js'
import {flow} from './initialize/flow.js'
import {text, string} from './initialize/text.js'
import {createTokenizer} from './create-tokenizer.js'
import * as defaultConstructs from './constructs.js'
/**
* @param {ParseOptions | null | undefined} [options]
* @returns {ParseContext}
*/
export function parse(options) {
const settings = options || {}
const constructs = /** @type {FullNormalizedExtension} */ (
combineExtensions([defaultConstructs, ...(settings.extensions || [])])
)
/** @type {ParseContext} */
const parser = {
defined: [],
lazy: {},
constructs,
content: create(content),
document: create(document),
flow: create(flow),
string: create(string),
text: create(text)
}
return parser
/**
* @param {InitialConstruct} initial
*/
function create(initial) {
return creator
/** @type {Create} */
function creator(from) {
return createTokenizer(parser, initial, from)
}
}
}

8
node_modules/micromark/dev/lib/postprocess.d.ts generated vendored Normal file
View file

@ -0,0 +1,8 @@
/**
* @param {Array<Event>} events
* @returns {Array<Event>}
*/
export function postprocess(
events: Array<import('micromark-util-types').Event>
): Array<import('micromark-util-types').Event>
export type Event = import('micromark-util-types').Event

17
node_modules/micromark/dev/lib/postprocess.js generated vendored Normal file
View file

@ -0,0 +1,17 @@
/**
* @typedef {import('micromark-util-types').Event} Event
*/
import {subtokenize} from 'micromark-util-subtokenize'
/**
* @param {Array<Event>} events
* @returns {Array<Event>}
*/
export function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events
}

13
node_modules/micromark/dev/lib/preprocess.d.ts generated vendored Normal file
View file

@ -0,0 +1,13 @@
/**
* @returns {Preprocessor}
*/
export function preprocess(): Preprocessor
export type Chunk = import('micromark-util-types').Chunk
export type Code = import('micromark-util-types').Code
export type Encoding = import('micromark-util-types').Encoding
export type Value = import('micromark-util-types').Value
export type Preprocessor = (
value: Value,
encoding?: Encoding | null | undefined,
end?: boolean | null | undefined
) => Array<Chunk>

134
node_modules/micromark/dev/lib/preprocess.js generated vendored Normal file
View file

@ -0,0 +1,134 @@
/**
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Encoding} Encoding
* @typedef {import('micromark-util-types').Value} Value
*/
/**
* @callback Preprocessor
* @param {Value} value
* @param {Encoding | null | undefined} [encoding]
* @param {boolean | null | undefined} [end=false]
* @returns {Array<Chunk>}
*/
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
const search = /[\0\t\n\r]/g
/**
* @returns {Preprocessor}
*/
export function preprocess() {
let column = 1
let buffer = ''
/** @type {boolean | undefined} */
let start = true
/** @type {boolean | undefined} */
let atCarriageReturn
return preprocessor
/** @type {Preprocessor} */
function preprocessor(value, encoding, end) {
/** @type {Array<Chunk>} */
const chunks = []
/** @type {RegExpMatchArray | null} */
let match
/** @type {number} */
let next
/** @type {number} */
let startPosition
/** @type {number} */
let endPosition
/** @type {Code} */
let code
// @ts-expect-error `Buffer` does allow an encoding.
value = buffer + value.toString(encoding)
startPosition = 0
buffer = ''
if (start) {
// To do: `markdown-rs` actually parses BOMs (byte order mark).
if (value.charCodeAt(0) === codes.byteOrderMarker) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {
search.lastIndex = startPosition
match = search.exec(value)
endPosition =
match && match.index !== undefined ? match.index : value.length
code = value.charCodeAt(endPosition)
if (!match) {
buffer = value.slice(startPosition)
break
}
if (
code === codes.lf &&
startPosition === endPosition &&
atCarriageReturn
) {
chunks.push(codes.carriageReturnLineFeed)
atCarriageReturn = undefined
} else {
if (atCarriageReturn) {
chunks.push(codes.carriageReturn)
atCarriageReturn = undefined
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition))
column += endPosition - startPosition
}
switch (code) {
case codes.nul: {
chunks.push(codes.replacementCharacter)
column++
break
}
case codes.ht: {
next = Math.ceil(column / constants.tabSize) * constants.tabSize
chunks.push(codes.horizontalTab)
while (column++ < next) chunks.push(codes.virtualSpace)
break
}
case codes.lf: {
chunks.push(codes.lineFeed)
column = 1
break
}
default: {
atCarriageReturn = true
column = 1
}
}
}
startPosition = endPosition + 1
}
if (end) {
if (atCarriageReturn) chunks.push(codes.carriageReturn)
if (buffer) chunks.push(buffer)
chunks.push(codes.eof)
}
return chunks
}
}

34
node_modules/micromark/dev/stream.d.ts generated vendored Normal file
View file

@ -0,0 +1,34 @@
/**
* Create a duplex (readable and writable) stream.
*
* Some of the work to parse markdown can be done streaming, but in the
* end buffering is required.
*
* micromark does not handle errors for you, so you must handle errors on whatever
* streams you pipe into it.
* As markdown does not know errors, `micromark` itself does not emit errors.
*
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {MinimalDuplex}
* Duplex stream.
*/
export function stream(options?: Options | null | undefined): MinimalDuplex
export type Options = import('micromark-util-types').Options
export type Value = import('micromark-util-types').Value
export type Encoding = import('micromark-util-types').Encoding
/**
* Function called when write was successful.
*/
export type Callback = () => void
export type MinimalDuplex = Omit<
NodeJS.ReadableStream & NodeJS.WritableStream,
| 'isPaused'
| 'pause'
| 'read'
| 'resume'
| 'setEncoding'
| 'unpipe'
| 'unshift'
| 'wrap'
>

252
node_modules/micromark/dev/stream.js generated vendored Normal file
View file

@ -0,0 +1,252 @@
/**
* @typedef {import('micromark-util-types').Options} Options
* @typedef {import('micromark-util-types').Value} Value
* @typedef {import('micromark-util-types').Encoding} Encoding
*/
/**
* @callback Callback
* Function called when write was successful.
* @returns {void}
* Nothing.
*
* @typedef {Omit<NodeJS.ReadableStream & NodeJS.WritableStream, 'isPaused' | 'pause' | 'read' | 'resume' | 'setEncoding' | 'unpipe' | 'unshift' | 'wrap'>} MinimalDuplex
*/
import {EventEmitter} from 'events'
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* Create a duplex (readable and writable) stream.
*
* Some of the work to parse markdown can be done streaming, but in the
* end buffering is required.
*
* micromark does not handle errors for you, so you must handle errors on whatever
* streams you pipe into it.
* As markdown does not know errors, `micromark` itself does not emit errors.
*
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {MinimalDuplex}
* Duplex stream.
*/
export function stream(options) {
const prep = preprocess()
const tokenize = parse(options).document().write
const comp = compile(options)
/** @type {boolean} */
let ended
/** @type {MinimalDuplex} */
// @ts-expect-error `addListener` is fine.
const emitter = Object.assign(new EventEmitter(), {
end,
pipe,
readable: true,
writable: true,
write
})
return emitter
/**
* Write a chunk into memory.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*/
function write(chunk, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
if (ended) {
throw new Error('Did not expect `write` after `end`')
}
tokenize(prep(chunk || '', encoding))
if (callback) {
callback()
}
// Signal successful write.
return true
}
/**
* End the writing.
*
* Passes all arguments as a final `write`.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
*
* @param {Callback | Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*/
function end(chunk, encoding, callback) {
if (typeof chunk === 'function') {
encoding = chunk
chunk = undefined
}
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
write(chunk, encoding, callback)
emitter.emit('data', comp(postprocess(tokenize(prep('', encoding, true)))))
emitter.emit('end')
ended = true
return true
}
/**
* Pipe the processor into a writable stream.
*
* Basically `Stream#pipe`, but inlined and simplified to keep the bundled
* size down.
* See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
*
* @template {NodeJS.WritableStream} Stream
* @param {Stream} dest
* @param {{end?: boolean | null | undefined}} [options]
* @returns {Stream}
*/
function pipe(dest, options) {
emitter.on('data', ondata)
emitter.on('error', onerror)
emitter.on('end', cleanup)
emitter.on('close', cleanup)
// If the `end` option is not supplied, `dest.end()` will be
// called when the `end` or `close` events are received.
// @ts-expect-error `_isStdio` is available on `std{err,out}`
if (!dest._isStdio && (!options || options.end !== false)) {
emitter.on('end', onend)
}
dest.on('error', onerror)
dest.on('close', cleanup)
dest.emit('pipe', emitter)
return dest
/**
* End destination stream.
*
* @returns {void}
*/
function onend() {
if (dest.end) {
dest.end()
}
}
/**
* Handle data.
*
* @param {string} chunk
* @returns {void}
*/
function ondata(chunk) {
if (dest.writable) {
dest.write(chunk)
}
}
/**
* Clean listeners.
*
* @returns {void}
*/
function cleanup() {
emitter.removeListener('data', ondata)
emitter.removeListener('end', onend)
emitter.removeListener('error', onerror)
emitter.removeListener('end', cleanup)
emitter.removeListener('close', cleanup)
dest.removeListener('error', onerror)
dest.removeListener('close', cleanup)
}
/**
* Close dangling pipes and handle unheard errors.
*
* @param {Error | null | undefined} [error]
* @returns {void}
*/
function onerror(error) {
cleanup()
if (!emitter.listenerCount('error')) {
throw error // Unhandled stream error in pipe.
}
}
}
}

12
node_modules/micromark/index.d.ts generated vendored Normal file
View file

@ -0,0 +1,12 @@
export function micromark(
value: Value,
encoding: Encoding | null | undefined,
options?: Options | null | undefined
): string
export function micromark(
value: Value,
options?: Options | null | undefined
): string
export type Encoding = import('micromark-util-types').Encoding
export type Options = import('micromark-util-types').Options
export type Value = import('micromark-util-types').Value

54
node_modules/micromark/index.js generated vendored Normal file
View file

@ -0,0 +1,54 @@
/**
* @typedef {import('micromark-util-types').Encoding} Encoding
* @typedef {import('micromark-util-types').Options} Options
* @typedef {import('micromark-util-types').Value} Value
*/
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* Compile markdown to HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Buffer`).
* @param {Encoding | null | undefined} encoding
* Character encoding to understand `value` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Buffer`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @param {Value} value
* Markdown to parse (`string` or `Buffer`).
* @param {Options | Encoding | null | undefined} [encoding]
* Character encoding to understand `value` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*/
export function micromark(value, encoding, options) {
if (typeof encoding !== 'string') {
options = encoding
encoding = undefined
}
return compile(options)(
postprocess(
parse(options).document().write(preprocess()(value, encoding, true))
)
)
}

24
node_modules/micromark/lib/compile.d.ts generated vendored Normal file
View file

@ -0,0 +1,24 @@
/**
* @param {CompileOptions | null | undefined} [options]
* @returns {Compile}
*/
export function compile(options?: CompileOptions | null | undefined): Compile
export type Compile = import('micromark-util-types').Compile
export type CompileContext = import('micromark-util-types').CompileContext
export type CompileData = import('micromark-util-types').CompileData
export type CompileOptions = import('micromark-util-types').CompileOptions
export type Definition = import('micromark-util-types').Definition
export type Event = import('micromark-util-types').Event
export type Handle = import('micromark-util-types').Handle
export type HtmlExtension = import('micromark-util-types').HtmlExtension
export type NormalizedHtmlExtension =
import('micromark-util-types').NormalizedHtmlExtension
export type Token = import('micromark-util-types').Token
export type Media = {
image?: boolean | undefined
labelId?: string | undefined
label?: string | undefined
referenceId?: string | undefined
destination?: string | undefined
title?: string | undefined
}

1045
node_modules/micromark/lib/compile.js generated vendored Normal file

File diff suppressed because it is too large Load diff

76
node_modules/micromark/lib/constructs.d.ts generated vendored Normal file
View file

@ -0,0 +1,76 @@
/** @satisfies {Extension['document']} */
export const document: {
42: import('micromark-util-types').Construct
43: import('micromark-util-types').Construct
45: import('micromark-util-types').Construct
48: import('micromark-util-types').Construct
49: import('micromark-util-types').Construct
50: import('micromark-util-types').Construct
51: import('micromark-util-types').Construct
52: import('micromark-util-types').Construct
53: import('micromark-util-types').Construct
54: import('micromark-util-types').Construct
55: import('micromark-util-types').Construct
56: import('micromark-util-types').Construct
57: import('micromark-util-types').Construct
62: import('micromark-util-types').Construct
}
/** @satisfies {Extension['contentInitial']} */
export const contentInitial: {
91: import('micromark-util-types').Construct
}
/** @satisfies {Extension['flowInitial']} */
export const flowInitial: {
[-2]: import('micromark-util-types').Construct
[-1]: import('micromark-util-types').Construct
32: import('micromark-util-types').Construct
}
/** @satisfies {Extension['flow']} */
export const flow: {
35: import('micromark-util-types').Construct
42: import('micromark-util-types').Construct
45: import('micromark-util-types').Construct[]
60: import('micromark-util-types').Construct
61: import('micromark-util-types').Construct
95: import('micromark-util-types').Construct
96: import('micromark-util-types').Construct
126: import('micromark-util-types').Construct
}
/** @satisfies {Extension['string']} */
export const string: {
38: import('micromark-util-types').Construct
92: import('micromark-util-types').Construct
}
/** @satisfies {Extension['text']} */
export const text: {
[-5]: import('micromark-util-types').Construct
[-4]: import('micromark-util-types').Construct
[-3]: import('micromark-util-types').Construct
33: import('micromark-util-types').Construct
38: import('micromark-util-types').Construct
42: import('micromark-util-types').Construct
60: import('micromark-util-types').Construct[]
91: import('micromark-util-types').Construct
92: import('micromark-util-types').Construct[]
93: import('micromark-util-types').Construct
95: import('micromark-util-types').Construct
96: import('micromark-util-types').Construct
}
export namespace insideSpan {
const _null: (
| import('micromark-util-types').Construct
| {
resolveAll: import('micromark-util-types').Resolver
}
)[]
export {_null as null}
}
export namespace attentionMarkers {
const _null_1: (42 | 95)[]
export {_null_1 as null}
}
export namespace disable {
const _null_2: never[]
export {_null_2 as null}
}
export type Extension = import('micromark-util-types').Extension

106
node_modules/micromark/lib/constructs.js generated vendored Normal file
View file

@ -0,0 +1,106 @@
/**
* @typedef {import('micromark-util-types').Extension} Extension
*/
import {
attention,
autolink,
blockQuote,
characterEscape,
characterReference,
codeFenced,
codeIndented,
codeText,
definition,
hardBreakEscape,
headingAtx,
htmlFlow,
htmlText,
labelEnd,
labelStartImage,
labelStartLink,
lineEnding,
list,
setextUnderline,
thematicBreak
} from 'micromark-core-commonmark'
import {resolver as resolveText} from './initialize/text.js'
/** @satisfies {Extension['document']} */
export const document = {
[42]: list,
[43]: list,
[45]: list,
[48]: list,
[49]: list,
[50]: list,
[51]: list,
[52]: list,
[53]: list,
[54]: list,
[55]: list,
[56]: list,
[57]: list,
[62]: blockQuote
}
/** @satisfies {Extension['contentInitial']} */
export const contentInitial = {
[91]: definition
}
/** @satisfies {Extension['flowInitial']} */
export const flowInitial = {
[-2]: codeIndented,
[-1]: codeIndented,
[32]: codeIndented
}
/** @satisfies {Extension['flow']} */
export const flow = {
[35]: headingAtx,
[42]: thematicBreak,
[45]: [setextUnderline, thematicBreak],
[60]: htmlFlow,
[61]: setextUnderline,
[95]: thematicBreak,
[96]: codeFenced,
[126]: codeFenced
}
/** @satisfies {Extension['string']} */
export const string = {
[38]: characterReference,
[92]: characterEscape
}
/** @satisfies {Extension['text']} */
export const text = {
[-5]: lineEnding,
[-4]: lineEnding,
[-3]: lineEnding,
[33]: labelStartImage,
[38]: characterReference,
[42]: attention,
[60]: [autolink, htmlText],
[91]: labelStartLink,
[92]: [hardBreakEscape, characterEscape],
[93]: labelEnd,
[95]: attention,
[96]: codeText
}
/** @satisfies {Extension['insideSpan']} */
export const insideSpan = {
null: [attention, resolveText]
}
/** @satisfies {Extension['attentionMarkers']} */
export const attentionMarkers = {
null: [42, 95]
}
/** @satisfies {Extension['disable']} */
export const disable = {
null: []
}

40
node_modules/micromark/lib/create-tokenizer.d.ts generated vendored Normal file
View file

@ -0,0 +1,40 @@
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* @param {InitialConstruct} initialize
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* @returns {TokenizeContext}
*/
export function createTokenizer(
parser: ParseContext,
initialize: InitialConstruct,
from?: Omit<Point, '_bufferIndex' | '_index'> | undefined
): TokenizeContext
export type Chunk = import('micromark-util-types').Chunk
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type ConstructRecord = import('micromark-util-types').ConstructRecord
export type Effects = import('micromark-util-types').Effects
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type ParseContext = import('micromark-util-types').ParseContext
export type Point = import('micromark-util-types').Point
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenType = import('micromark-util-types').TokenType
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Restore = () => void
export type Info = {
restore: Restore
from: number
}
/**
* Handle a successful run.
*/
export type ReturnHandle = (construct: Construct, info: Info) => void

582
node_modules/micromark/lib/create-tokenizer.js generated vendored Normal file
View file

@ -0,0 +1,582 @@
/**
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').ConstructRecord} ConstructRecord
* @typedef {import('micromark-util-types').Effects} Effects
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').ParseContext} ParseContext
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenType} TokenType
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
/**
* @callback Restore
* @returns {void}
*
* @typedef Info
* @property {Restore} restore
* @property {number} from
*
* @callback ReturnHandle
* Handle a successful run.
* @param {Construct} construct
* @param {Info} info
* @returns {void}
*/
import {markdownLineEnding} from 'micromark-util-character'
import {push, splice} from 'micromark-util-chunked'
import {resolveAll} from 'micromark-util-resolve-all'
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* @param {InitialConstruct} initialize
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* @returns {TokenizeContext}
*/
export function createTokenizer(parser, initialize, from) {
/** @type {Point} */
let point = Object.assign(
from
? Object.assign({}, from)
: {
line: 1,
column: 1,
offset: 0
},
{
_index: 0,
_bufferIndex: -1
}
)
/** @type {Record<string, number>} */
const columnStart = {}
/** @type {Array<Construct>} */
const resolveAllConstructs = []
/** @type {Array<Chunk>} */
let chunks = []
/** @type {Array<Token>} */
let stack = []
/** @type {boolean | undefined} */
let consumed = true
/**
* Tools used for tokenizing.
*
* @type {Effects}
*/
const effects = {
consume,
enter,
exit,
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
interrupt: constructFactory(onsuccessfulcheck, {
interrupt: true
})
}
/**
* State and tools for resolving and serializing.
*
* @type {TokenizeContext}
*/
const context = {
previous: null,
code: null,
containerState: {},
events: [],
parser,
sliceStream,
sliceSerialize,
now,
defineSkip,
write
}
/**
* The state function.
*
* @type {State | void}
*/
let state = initialize.tokenize.call(context, effects)
/**
* Track which character we expect to be consumed, to catch bugs.
*
* @type {Code}
*/
let expectedCode
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
}
return context
/** @type {TokenizeContext['write']} */
function write(slice) {
chunks = push(chunks, slice)
main()
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== null) {
return []
}
addResult(initialize, 0)
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
}
//
// Tools.
//
/** @type {TokenizeContext['sliceSerialize']} */
function sliceSerialize(token, expandTabs) {
return serializeChunks(sliceStream(token), expandTabs)
}
/** @type {TokenizeContext['sliceStream']} */
function sliceStream(token) {
return sliceChunks(chunks, token)
}
/** @type {TokenizeContext['now']} */
function now() {
// This is a hot path, so we clone manually instead of `Object.assign({}, point)`
const {line, column, offset, _index, _bufferIndex} = point
return {
line,
column,
offset,
_index,
_bufferIndex
}
}
/** @type {TokenizeContext['defineSkip']} */
function defineSkip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
}
//
// State management.
//
/**
* Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
* `consume`).
* Here is where we walk through the chunks, which either include strings of
* several characters, or numerical character codes.
* The reason to do this in a loop instead of a call is so the stack can
* drain.
*
* @returns {void}
*/
function main() {
/** @type {number} */
let chunkIndex
while (point._index < chunks.length) {
const chunk = chunks[point._index]
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
}
/**
* Deal with one code.
*
* @param {Code} code
* @returns {void}
*/
function go(code) {
consumed = undefined
expectedCode = code
state = state(code)
}
/** @type {Effects['consume']} */
function consume(code) {
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === -3 ? 2 : 1
accountForPotentialSkip()
} else if (code !== -1) {
point.column++
point.offset++
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++
// At end of string chunk.
// @ts-expect-error Points w/ non-negative `_bufferIndex` reference
// strings.
if (point._bufferIndex === chunks[point._index].length) {
point._bufferIndex = -1
point._index++
}
}
// Expose the previous character.
context.previous = code
// Mark as consumed.
consumed = true
}
/** @type {Effects['enter']} */
function enter(type, fields) {
/** @type {Token} */
// @ts-expect-error Patch instead of assign required fields to help GC.
const token = fields || {}
token.type = type
token.start = now()
context.events.push(['enter', token, context])
stack.push(token)
return token
}
/** @type {Effects['exit']} */
function exit(type) {
const token = stack.pop()
token.end = now()
context.events.push(['exit', token, context])
return token
}
/**
* Use results.
*
* @type {ReturnHandle}
*/
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
}
/**
* Discard results.
*
* @type {ReturnHandle}
*/
function onsuccessfulcheck(_, info) {
info.restore()
}
/**
* Factory to attempt/check/interrupt.
*
* @param {ReturnHandle} onreturn
* @param {{interrupt?: boolean | undefined} | undefined} [fields]
*/
function constructFactory(onreturn, fields) {
return hook
/**
* Handle either an object mapping codes to constructs, a list of
* constructs, or a single construct.
*
* @param {Array<Construct> | Construct | ConstructRecord} constructs
* @param {State} returnState
* @param {State | undefined} [bogusState]
* @returns {State}
*/
function hook(constructs, returnState, bogusState) {
/** @type {Array<Construct>} */
let listOfConstructs
/** @type {number} */
let constructIndex
/** @type {Construct} */
let currentConstruct
/** @type {Info} */
let info
return Array.isArray(constructs) /* c8 ignore next 1 */
? handleListOfConstructs(constructs)
: 'tokenize' in constructs
? // @ts-expect-error Looks like a construct.
handleListOfConstructs([constructs])
: handleMapOfConstructs(constructs)
/**
* Handle a list of construct.
*
* @param {ConstructRecord} map
* @returns {State}
*/
function handleMapOfConstructs(map) {
return start
/** @type {State} */
function start(code) {
const def = code !== null && map[code]
const all = code !== null && map.null
const list = [
// To do: add more extension tests.
/* c8 ignore next 2 */
...(Array.isArray(def) ? def : def ? [def] : []),
...(Array.isArray(all) ? all : all ? [all] : [])
]
return handleListOfConstructs(list)(code)
}
}
/**
* Handle a list of construct.
*
* @param {Array<Construct>} list
* @returns {State}
*/
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
if (list.length === 0) {
return bogusState
}
return handleConstruct(list[constructIndex])
}
/**
* Handle a single construct.
*
* @param {Construct} construct
* @returns {State}
*/
function handleConstruct(construct) {
return start
/** @type {State} */
function start(code) {
// To do: not needed to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
// Always populated by defaults.
if (
construct.name &&
context.parser.constructs.disable.null.includes(construct.name)
) {
return nok(code)
}
return construct.tokenize.call(
// If we do have fields, create an object w/ `context` as its
// prototype.
// This allows a “live binding”, which is needed for `interrupt`.
fields ? Object.assign(Object.create(context), fields) : context,
effects,
ok,
nok
)(code)
}
}
/** @type {State} */
function ok(code) {
consumed = true
onreturn(currentConstruct, info)
return returnState
}
/** @type {State} */
function nok(code) {
consumed = true
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
/**
* @param {Construct} construct
* @param {number} from
* @returns {void}
*/
function addResult(construct, from) {
if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
splice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
}
/**
* Store state.
*
* @returns {Info}
*/
function store() {
const startPoint = now()
const startPrevious = context.previous
const startCurrentConstruct = context.currentConstruct
const startEventsIndex = context.events.length
const startStack = Array.from(stack)
return {
restore,
from: startEventsIndex
}
/**
* Restore state.
*
* @returns {void}
*/
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
}
}
/**
* Move the current point a bit forward in the line when its on a column
* skip.
*
* @returns {void}
*/
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}
/**
* Get the chunks from a slice of chunks in the range of a token.
*
* @param {Array<Chunk>} chunks
* @param {Pick<Token, 'end' | 'start'>} token
* @returns {Array<Chunk>}
*/
function sliceChunks(chunks, token) {
const startIndex = token.start._index
const startBufferIndex = token.start._bufferIndex
const endIndex = token.end._index
const endBufferIndex = token.end._bufferIndex
/** @type {Array<Chunk>} */
let view
if (startIndex === endIndex) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
const head = view[0]
if (typeof head === 'string') {
view[0] = head.slice(startBufferIndex)
} else {
view.shift()
}
}
if (endBufferIndex > 0) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}
/**
* Get the string value of a slice of chunks.
*
* @param {Array<Chunk>} chunks
* @param {boolean | undefined} [expandTabs=false]
* @returns {string}
*/
function serializeChunks(chunks, expandTabs) {
let index = -1
/** @type {Array<string>} */
const result = []
/** @type {boolean | undefined} */
let atTab
while (++index < chunks.length) {
const chunk = chunks[index]
/** @type {string} */
let value
if (typeof chunk === 'string') {
value = chunk
} else
switch (chunk) {
case -5: {
value = '\r'
break
}
case -4: {
value = '\n'
break
}
case -3: {
value = '\r' + '\n'
break
}
case -2: {
value = expandTabs ? ' ' : '\t'
break
}
case -1: {
if (!expandTabs && atTab) continue
value = ' '
break
}
default: {
// Currently only replacement character.
value = String.fromCharCode(chunk)
}
}
atTab = chunk === -2
result.push(value)
}
return result.join('')
}

5
node_modules/micromark/lib/example.d.ts generated vendored Normal file
View file

@ -0,0 +1,5 @@
export default function myRemarkPluginAddingComp(
this: import('unified').Processor<void, import('mdast').Root, void, void>
):
| void
| import('unified').Transformer<import('mdast').Root, import('mdast').Root>

7
node_modules/micromark/lib/initialize/content.d.ts generated vendored Normal file
View file

@ -0,0 +1,7 @@
/** @type {InitialConstruct} */
export const content: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext

79
node_modules/micromark/lib/initialize/content.js generated vendored Normal file
View file

@ -0,0 +1,79 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
/** @type {InitialConstruct} */
export const content = {
tokenize: initializeContent
}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeContent(effects) {
const contentStart = effects.attempt(
this.parser.constructs.contentInitial,
afterContentStartConstruct,
paragraphInitial
)
/** @type {Token} */
let previous
return contentStart
/** @type {State} */
function afterContentStartConstruct(code) {
if (code === null) {
effects.consume(code)
return
}
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return factorySpace(effects, contentStart, 'linePrefix')
}
/** @type {State} */
function paragraphInitial(code) {
effects.enter('paragraph')
return lineStart(code)
}
/** @type {State} */
function lineStart(code) {
const token = effects.enter('chunkText', {
contentType: 'text',
previous
})
if (previous) {
previous.next = token
}
previous = token
return data(code)
}
/** @type {State} */
function data(code) {
if (code === null) {
effects.exit('chunkText')
effects.exit('paragraph')
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
effects.exit('chunkText')
return lineStart
}
// Data.
effects.consume(code)
return data
}
}

12
node_modules/micromark/lib/initialize/document.d.ts generated vendored Normal file
View file

@ -0,0 +1,12 @@
/** @type {InitialConstruct} */
export const document: InitialConstruct
export type Construct = import('micromark-util-types').Construct
export type ContainerState = import('micromark-util-types').ContainerState
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type Point = import('micromark-util-types').Point
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type StackItem = [Construct, ContainerState]

382
node_modules/micromark/lib/initialize/document.js generated vendored Normal file
View file

@ -0,0 +1,382 @@
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').ContainerState} ContainerState
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
*/
/**
* @typedef {[Construct, ContainerState]} StackItem
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {splice} from 'micromark-util-chunked'
/** @type {InitialConstruct} */
export const document = {
tokenize: initializeDocument
}
/** @type {Construct} */
const containerConstruct = {
tokenize: tokenizeContainer
}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeDocument(effects) {
const self = this
/** @type {Array<StackItem>} */
const stack = []
let continued = 0
/** @type {TokenizeContext | undefined} */
let childFlow
/** @type {Token | undefined} */
let childToken
/** @type {number} */
let lineStartOffset
return start
/** @type {State} */
function start(code) {
// First we iterate through the open blocks, starting with the root
// document, and descending through last children down to the last open
// block.
// Each block imposes a condition that the line must satisfy if the block is
// to remain open.
// For example, a block quote requires a `>` character.
// A paragraph requires a non-blank line.
// In this phase we may match all or just some of the open blocks.
// But we cannot close unmatched blocks yet, because we may have a lazy
// continuation line.
if (continued < stack.length) {
const item = stack[continued]
self.containerState = item[1]
return effects.attempt(
item[0].continuation,
documentContinue,
checkNewContainers
)(code)
}
// Done.
return checkNewContainers(code)
}
/** @type {State} */
function documentContinue(code) {
continued++
// Note: this field is called `_closeFlow` but it also closes containers.
// Perhaps a good idea to rename it but its already used in the wild by
// extensions.
if (self.containerState._closeFlow) {
self.containerState._closeFlow = undefined
if (childFlow) {
closeFlow()
}
// Note: this algorithm for moving events around is similar to the
// algorithm when dealing with lazy lines in `writeToChild`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {Point | undefined} */
let point
// Find the flow chunk.
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === 'chunkFlow'
) {
point = self.events[indexBeforeFlow][1].end
break
}
}
exitContainers(continued)
// Fix positions.
let index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = Object.assign({}, point)
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
return checkNewContainers(code)
}
return start(code)
}
/** @type {State} */
function checkNewContainers(code) {
// Next, after consuming the continuation markers for existing blocks, we
// look for new block starts (e.g. `>` for a block quote).
// If we encounter a new block start, we close any blocks unmatched in
// step 1 before creating the new block as a child of the last matched
// block.
if (continued === stack.length) {
// No need to `check` whether theres a container, of `exitContainers`
// would be moot.
// We can instead immediately `attempt` to parse one.
if (!childFlow) {
return documentContinued(code)
}
// If we have concrete content, such as block HTML or fenced code,
// we cant have containers “pierce” into them, so we can immediately
// start.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
return flowStart(code)
}
// If we do have flow, it could still be a blank line,
// but wed be interrupting it w/ a new container if theres a current
// construct.
// To do: next major: remove `_gfmTableDynamicInterruptHack` (no longer
// needed in micromark-extension-gfm-table@1.0.6).
self.interrupt = Boolean(
childFlow.currentConstruct && !childFlow._gfmTableDynamicInterruptHack
)
}
// Check if there is a new container.
self.containerState = {}
return effects.check(
containerConstruct,
thereIsANewContainer,
thereIsNoNewContainer
)(code)
}
/** @type {State} */
function thereIsANewContainer(code) {
if (childFlow) closeFlow()
exitContainers(continued)
return documentContinued(code)
}
/** @type {State} */
function thereIsNoNewContainer(code) {
self.parser.lazy[self.now().line] = continued !== stack.length
lineStartOffset = self.now().offset
return flowStart(code)
}
/** @type {State} */
function documentContinued(code) {
// Try new containers.
self.containerState = {}
return effects.attempt(
containerConstruct,
containerContinue,
flowStart
)(code)
}
/** @type {State} */
function containerContinue(code) {
continued++
stack.push([self.currentConstruct, self.containerState])
// Try another.
return documentContinued(code)
}
/** @type {State} */
function flowStart(code) {
if (code === null) {
if (childFlow) closeFlow()
exitContainers(0)
effects.consume(code)
return
}
childFlow = childFlow || self.parser.flow(self.now())
effects.enter('chunkFlow', {
contentType: 'flow',
previous: childToken,
_tokenizer: childFlow
})
return flowContinue(code)
}
/** @type {State} */
function flowContinue(code) {
if (code === null) {
writeToChild(effects.exit('chunkFlow'), true)
exitContainers(0)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
writeToChild(effects.exit('chunkFlow'))
// Get ready for the next line.
continued = 0
self.interrupt = undefined
return start
}
effects.consume(code)
return flowContinue
}
/**
* @param {Token} token
* @param {boolean | undefined} [eof]
* @returns {void}
*/
function writeToChild(token, eof) {
const stream = self.sliceStream(token)
if (eof) stream.push(null)
token.previous = childToken
if (childToken) childToken.next = token
childToken = token
childFlow.defineSkip(token.start)
childFlow.write(stream)
// Alright, so we just added a lazy line:
//
// ```markdown
// > a
// b.
//
// Or:
//
// > ~~~c
// d
//
// Or:
//
// > | e |
// f
// ```
//
// The construct in the second example (fenced code) does not accept lazy
// lines, so it marked itself as done at the end of its first line, and
// then the content construct parses `d`.
// Most constructs in markdown match on the first line: if the first line
// forms a construct, a non-lazy line cant “unmake” it.
//
// The construct in the third example is potentially a GFM table, and
// those are *weird*.
// It *could* be a table, from the first line, if the following line
// matches a condition.
// In this case, that second line is lazy, which “unmakes” the first line
// and turns the whole into one content block.
//
// Weve now parsed the non-lazy and the lazy line, and can figure out
// whether the lazy line started a new flow block.
// If it did, we exit the current containers between the two flow blocks.
if (self.parser.lazy[token.start.line]) {
let index = childFlow.events.length
while (index--) {
if (
// The token starts before the line ending…
childFlow.events[index][1].start.offset < lineStartOffset &&
// …and either is not ended yet…
(!childFlow.events[index][1].end ||
// …or ends after it.
childFlow.events[index][1].end.offset > lineStartOffset)
) {
// Exit: theres still something open, which means its a lazy line
// part of something.
return
}
}
// Note: this algorithm for moving events around is similar to the
// algorithm when closing flow in `documentContinue`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {boolean | undefined} */
let seen
/** @type {Point | undefined} */
let point
// Find the previous chunk (the one before the lazy line).
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === 'chunkFlow'
) {
if (seen) {
point = self.events[indexBeforeFlow][1].end
break
}
seen = true
}
}
exitContainers(continued)
// Fix positions.
index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = Object.assign({}, point)
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
}
}
/**
* @param {number} size
* @returns {void}
*/
function exitContainers(size) {
let index = stack.length
// Exit open containers.
while (index-- > size) {
const entry = stack[index]
self.containerState = entry[1]
entry[0].exit.call(self, effects)
}
stack.length = size
}
function closeFlow() {
childFlow.write([null])
childToken = undefined
childFlow = undefined
self.containerState._closeFlow = undefined
}
}
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeContainer(effects, ok, nok) {
// Always populated by defaults.
return factorySpace(
effects,
effects.attempt(this.parser.constructs.document, ok, nok),
'linePrefix',
this.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4
)
}

6
node_modules/micromark/lib/initialize/flow.d.ts generated vendored Normal file
View file

@ -0,0 +1,6 @@
/** @type {InitialConstruct} */
export const flow: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext

68
node_modules/micromark/lib/initialize/flow.js generated vendored Normal file
View file

@ -0,0 +1,68 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
import {blankLine, content} from 'micromark-core-commonmark'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
/** @type {InitialConstruct} */
export const flow = {
tokenize: initializeFlow
}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeFlow(effects) {
const self = this
const initial = effects.attempt(
// Try to parse a blank line.
blankLine,
atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(
this.parser.constructs.flowInitial,
afterConstruct,
factorySpace(
effects,
effects.attempt(
this.parser.constructs.flow,
afterConstruct,
effects.attempt(content, afterConstruct)
),
'linePrefix'
)
)
)
return initial
/** @type {State} */
function atBlankEnding(code) {
if (code === null) {
effects.consume(code)
return
}
effects.enter('lineEndingBlank')
effects.consume(code)
effects.exit('lineEndingBlank')
self.currentConstruct = undefined
return initial
}
/** @type {State} */
function afterConstruct(code) {
if (code === null) {
effects.consume(code)
return
}
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
self.currentConstruct = undefined
return initial
}
}

11
node_modules/micromark/lib/initialize/text.d.ts generated vendored Normal file
View file

@ -0,0 +1,11 @@
export namespace resolver {
const resolveAll: import('micromark-util-types').Resolver
}
export const string: import('micromark-util-types').InitialConstruct
export const text: import('micromark-util-types').InitialConstruct
export type Code = import('micromark-util-types').Code
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext

210
node_modules/micromark/lib/initialize/text.js generated vendored Normal file
View file

@ -0,0 +1,210 @@
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
*/
export const resolver = {
resolveAll: createResolver()
}
export const string = initializeFactory('string')
export const text = initializeFactory('text')
/**
* @param {'string' | 'text'} field
* @returns {InitialConstruct}
*/
function initializeFactory(field) {
return {
tokenize: initializeText,
resolveAll: createResolver(
field === 'text' ? resolveAllLineSuffixes : undefined
)
}
/**
* @this {TokenizeContext}
* @type {Initializer}
*/
function initializeText(effects) {
const self = this
const constructs = this.parser.constructs[field]
const text = effects.attempt(constructs, start, notText)
return start
/** @type {State} */
function start(code) {
return atBreak(code) ? text(code) : notText(code)
}
/** @type {State} */
function notText(code) {
if (code === null) {
effects.consume(code)
return
}
effects.enter('data')
effects.consume(code)
return data
}
/** @type {State} */
function data(code) {
if (atBreak(code)) {
effects.exit('data')
return text(code)
}
// Data.
effects.consume(code)
return data
}
/**
* @param {Code} code
* @returns {boolean}
*/
function atBreak(code) {
if (code === null) {
return true
}
const list = constructs[code]
let index = -1
if (list) {
// Always populated by defaults.
while (++index < list.length) {
const item = list[index]
if (!item.previous || item.previous.call(self, self.previous)) {
return true
}
}
}
return false
}
}
}
/**
* @param {Resolver | undefined} [extraResolver]
* @returns {Resolver}
*/
function createResolver(extraResolver) {
return resolveAllText
/** @type {Resolver} */
function resolveAllText(events, context) {
let index = -1
/** @type {number | undefined} */
let enter
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === 'data') {
enter = index
index++
}
} else if (!events[index] || events[index][1].type !== 'data') {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
index = enter + 2
}
enter = undefined
}
}
return extraResolver ? extraResolver(events, context) : events
}
}
/**
* A rather ugly set of instructions which again looks at chunks in the input
* stream.
* The reason to do this here is that it is *much* faster to parse in reverse.
* And that we cant hook into `null` to split the line suffix before an EOF.
* To do: figure out if we can make this into a clean utility, or even in core.
* As it will be useful for GFMs literal autolink extension (and maybe even
* tables?)
*
* @type {Resolver}
*/
function resolveAllLineSuffixes(events, context) {
let eventIndex = 0 // Skip first.
while (++eventIndex <= events.length) {
if (
(eventIndex === events.length ||
events[eventIndex][1].type === 'lineEnding') &&
events[eventIndex - 1][1].type === 'data'
) {
const data = events[eventIndex - 1][1]
const chunks = context.sliceStream(data)
let index = chunks.length
let bufferIndex = -1
let size = 0
/** @type {boolean | undefined} */
let tabs
while (index--) {
const chunk = chunks[index]
if (typeof chunk === 'string') {
bufferIndex = chunk.length
while (chunk.charCodeAt(bufferIndex - 1) === 32) {
size++
bufferIndex--
}
if (bufferIndex) break
bufferIndex = -1
}
// Number
else if (chunk === -2) {
tabs = true
size++
} else if (chunk === -1) {
// Empty
} else {
// Replacement character, exit.
index++
break
}
}
if (size) {
const token = {
type:
eventIndex === events.length || tabs || size < 2
? 'lineSuffix'
: 'hardBreakTrailing',
start: {
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size,
_index: data.start._index + index,
_bufferIndex: index
? bufferIndex
: data.start._bufferIndex + bufferIndex
},
end: Object.assign({}, data.end)
}
data.end = Object.assign({}, token.start)
if (data.start.offset === data.end.offset) {
Object.assign(data, token)
} else {
events.splice(
eventIndex,
0,
['enter', token, context],
['exit', token, context]
)
eventIndex += 2
}
}
eventIndex++
}
}
return events
}

11
node_modules/micromark/lib/parse.d.ts generated vendored Normal file
View file

@ -0,0 +1,11 @@
/**
* @param {ParseOptions | null | undefined} [options]
* @returns {ParseContext}
*/
export function parse(options?: ParseOptions | null | undefined): ParseContext
export type Create = import('micromark-util-types').Create
export type FullNormalizedExtension =
import('micromark-util-types').FullNormalizedExtension
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type ParseContext = import('micromark-util-types').ParseContext
export type ParseOptions = import('micromark-util-types').ParseOptions

50
node_modules/micromark/lib/parse.js generated vendored Normal file
View file

@ -0,0 +1,50 @@
/**
* @typedef {import('micromark-util-types').Create} Create
* @typedef {import('micromark-util-types').FullNormalizedExtension} FullNormalizedExtension
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').ParseContext} ParseContext
* @typedef {import('micromark-util-types').ParseOptions} ParseOptions
*/
import {combineExtensions} from 'micromark-util-combine-extensions'
import {content} from './initialize/content.js'
import {document} from './initialize/document.js'
import {flow} from './initialize/flow.js'
import {text, string} from './initialize/text.js'
import {createTokenizer} from './create-tokenizer.js'
import * as defaultConstructs from './constructs.js'
/**
* @param {ParseOptions | null | undefined} [options]
* @returns {ParseContext}
*/
export function parse(options) {
const settings = options || {}
const constructs =
/** @type {FullNormalizedExtension} */
combineExtensions([defaultConstructs, ...(settings.extensions || [])])
/** @type {ParseContext} */
const parser = {
defined: [],
lazy: {},
constructs,
content: create(content),
document: create(document),
flow: create(flow),
string: create(string),
text: create(text)
}
return parser
/**
* @param {InitialConstruct} initial
*/
function create(initial) {
return creator
/** @type {Create} */
function creator(from) {
return createTokenizer(parser, initial, from)
}
}
}

8
node_modules/micromark/lib/postprocess.d.ts generated vendored Normal file
View file

@ -0,0 +1,8 @@
/**
* @param {Array<Event>} events
* @returns {Array<Event>}
*/
export function postprocess(
events: Array<import('micromark-util-types').Event>
): Array<import('micromark-util-types').Event>
export type Event = import('micromark-util-types').Event

16
node_modules/micromark/lib/postprocess.js generated vendored Normal file
View file

@ -0,0 +1,16 @@
/**
* @typedef {import('micromark-util-types').Event} Event
*/
import {subtokenize} from 'micromark-util-subtokenize'
/**
* @param {Array<Event>} events
* @returns {Array<Event>}
*/
export function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events
}

13
node_modules/micromark/lib/preprocess.d.ts generated vendored Normal file
View file

@ -0,0 +1,13 @@
/**
* @returns {Preprocessor}
*/
export function preprocess(): Preprocessor
export type Chunk = import('micromark-util-types').Chunk
export type Code = import('micromark-util-types').Code
export type Encoding = import('micromark-util-types').Encoding
export type Value = import('micromark-util-types').Value
export type Preprocessor = (
value: Value,
encoding?: Encoding | null | undefined,
end?: boolean | null | undefined
) => Array<Chunk>

110
node_modules/micromark/lib/preprocess.js generated vendored Normal file
View file

@ -0,0 +1,110 @@
/**
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Encoding} Encoding
* @typedef {import('micromark-util-types').Value} Value
*/
/**
* @callback Preprocessor
* @param {Value} value
* @param {Encoding | null | undefined} [encoding]
* @param {boolean | null | undefined} [end=false]
* @returns {Array<Chunk>}
*/
const search = /[\0\t\n\r]/g
/**
* @returns {Preprocessor}
*/
export function preprocess() {
let column = 1
let buffer = ''
/** @type {boolean | undefined} */
let start = true
/** @type {boolean | undefined} */
let atCarriageReturn
return preprocessor
/** @type {Preprocessor} */
function preprocessor(value, encoding, end) {
/** @type {Array<Chunk>} */
const chunks = []
/** @type {RegExpMatchArray | null} */
let match
/** @type {number} */
let next
/** @type {number} */
let startPosition
/** @type {number} */
let endPosition
/** @type {Code} */
let code
// @ts-expect-error `Buffer` does allow an encoding.
value = buffer + value.toString(encoding)
startPosition = 0
buffer = ''
if (start) {
// To do: `markdown-rs` actually parses BOMs (byte order mark).
if (value.charCodeAt(0) === 65279) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {
search.lastIndex = startPosition
match = search.exec(value)
endPosition =
match && match.index !== undefined ? match.index : value.length
code = value.charCodeAt(endPosition)
if (!match) {
buffer = value.slice(startPosition)
break
}
if (code === 10 && startPosition === endPosition && atCarriageReturn) {
chunks.push(-3)
atCarriageReturn = undefined
} else {
if (atCarriageReturn) {
chunks.push(-5)
atCarriageReturn = undefined
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition))
column += endPosition - startPosition
}
switch (code) {
case 0: {
chunks.push(65533)
column++
break
}
case 9: {
next = Math.ceil(column / 4) * 4
chunks.push(-2)
while (column++ < next) chunks.push(-1)
break
}
case 10: {
chunks.push(-4)
column = 1
break
}
default: {
atCarriageReturn = true
column = 1
}
}
}
startPosition = endPosition + 1
}
if (end) {
if (atCarriageReturn) chunks.push(-5)
if (buffer) chunks.push(buffer)
chunks.push(null)
}
return chunks
}
}

1
node_modules/micromark/node_modules/.bin/uvu generated vendored Symbolic link
View file

@ -0,0 +1 @@
../../../uvu/bin.js

140
node_modules/micromark/package.json generated vendored Normal file
View file

@ -0,0 +1,140 @@
{
"name": "micromark",
"version": "3.2.0",
"description": "small commonmark compliant markdown parser with positional info and concrete tokens",
"license": "MIT",
"keywords": [
"commonmark",
"compiler",
"gfm",
"html",
"lexer",
"markdown",
"markup",
"md",
"unified",
"parse",
"parser",
"plugin",
"process",
"remark",
"render",
"renderer",
"token",
"tokenizer"
],
"repository": "https://github.com/micromark/micromark/tree/main/packages/micromark",
"bugs": "https://github.com/micromark/micromark/issues",
"funding": [
{
"type": "GitHub Sponsors",
"url": "https://github.com/sponsors/unifiedjs"
},
{
"type": "OpenCollective",
"url": "https://opencollective.com/unified"
}
],
"author": "Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)",
"contributors": [
"Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)"
],
"sideEffects": false,
"type": "module",
"main": "index.js",
"types": "dev/index.d.ts",
"files": [
"dev/",
"lib/",
"index.d.ts",
"index.js",
"stream.d.ts",
"stream.js"
],
"exports": {
".": {
"types": "./dev/index.d.ts",
"development": "./dev/index.js",
"default": "./index.js"
},
"./stream": {
"types": "./dev/stream.d.ts",
"development": "./dev/stream.js",
"default": "./stream.js"
},
"./stream.js": {
"types": "./dev/stream.d.ts",
"development": "./dev/stream.js",
"default": "./stream.js"
},
"./lib/compile": {
"types": "./dev/lib/compile.d.ts",
"development": "./dev/lib/compile.js",
"default": "./lib/compile.js"
},
"./lib/compile.js": {
"types": "./dev/lib/compile.d.ts",
"development": "./dev/lib/compile.js",
"default": "./lib/compile.js"
},
"./lib/parse": {
"types": "./dev/lib/parse.d.ts",
"development": "./dev/lib/parse.js",
"default": "./lib/parse.js"
},
"./lib/parse.js": {
"types": "./dev/lib/parse.d.ts",
"development": "./dev/lib/parse.js",
"default": "./lib/parse.js"
},
"./lib/postprocess": {
"types": "./dev/lib/postprocess.d.ts",
"development": "./dev/lib/postprocess.js",
"default": "./lib/postprocess.js"
},
"./lib/postprocess.js": {
"types": "./dev/lib/postprocess.d.ts",
"development": "./dev/lib/postprocess.js",
"default": "./lib/postprocess.js"
},
"./lib/preprocess": {
"types": "./dev/lib/preprocess.d.ts",
"development": "./dev/lib/preprocess.js",
"default": "./lib/preprocess.js"
},
"./lib/preprocess.js": {
"types": "./dev/lib/preprocess.d.ts",
"development": "./dev/lib/preprocess.js",
"default": "./lib/preprocess.js"
}
},
"dependencies": {
"@types/debug": "^4.0.0",
"debug": "^4.0.0",
"micromark-core-commonmark": "^1.0.1",
"micromark-factory-space": "^1.0.0",
"micromark-util-character": "^1.0.0",
"micromark-util-chunked": "^1.0.0",
"micromark-util-combine-extensions": "^1.0.0",
"micromark-util-decode-numeric-character-reference": "^1.0.0",
"micromark-util-encode": "^1.0.0",
"micromark-util-normalize-identifier": "^1.0.0",
"micromark-util-resolve-all": "^1.0.0",
"micromark-util-sanitize-uri": "^1.0.0",
"micromark-util-subtokenize": "^1.0.0",
"micromark-util-symbol": "^1.0.0",
"micromark-util-types": "^1.0.1",
"decode-named-character-reference": "^1.0.0",
"uvu": "^0.5.0"
},
"scripts": {
"build": "micromark-build"
},
"xo": false,
"typeCoverage": {
"atLeast": 100,
"detail": true,
"strict": true,
"ignoreCatch": true
}
}

485
node_modules/micromark/readme.md generated vendored Normal file
View file

@ -0,0 +1,485 @@
# micromark
[![Build][build-badge]][build]
[![Coverage][coverage-badge]][coverage]
[![Downloads][downloads-badge]][downloads]
[![Size][bundle-size-badge]][bundle-size]
[![Sponsors][sponsors-badge]][opencollective]
[![Backers][backers-badge]][opencollective]
[![Chat][chat-badge]][chat]
Markdown parser.
> **Note**: this is the `micromark` package from the micromark monorepo.
> See the [monorepo readme][micromark] for more on the project.
> See this readme for how to use it.
## Feature highlights
<!-- Note: this section has to be in sync with the monorepo readme. -->
* [x] **[compliant][commonmark]** (100% to CommonMark)
* [x] **[extensions][]** (100% [GFM][], 100% [MDX.js][mdxjs], [directives][],
[frontmatter][], [math][])
* [x] **[safe][security]** (by default)
* [x] **[robust][test]** (±2k tests, 100% coverage, fuzz testing)
* [x] **[small][size-debug]** (smallest CM parser at ±15kb)
## Contents
* [When should I use this?](#when-should-i-use-this)
* [What is this?](#what-is-this)
* [Install](#install)
* [Use](#use)
* [API](#api)
* [`micromark(value[, encoding][, options])`](#micromarkvalue-encoding-options)
* [`stream(options?)`](#streamoptions)
* [`Options`](#options)
* [Types](#types)
* [Compatibility](#compatibility)
* [Security](#security)
* [Contribute](#contribute)
* [Sponsor](#sponsor)
* [License](#license)
## When should I use this?
<!-- Note: this section has to be in sync with the monorepo readme. -->
* If you *just* want to turn markdown into HTML (with maybe a few extensions)
* If you want to do *really complex things* with markdown
See [§ Comparison][comparison] for more info
## What is this?
<!-- Note: this section has to be in sync with the monorepo readme. -->
`micromark` is an open source markdown parser written in JavaScript.
Its implemented as a state machine that emits concrete tokens, so that every
byte is accounted for, with positional info.
It then compiles those tokens directly to HTML, but other tools can take the
data and for example build an AST which is easier to work with
([`mdast-util-to-markdown`][mdast-util-to-markdown]).
While most markdown parsers work towards compliancy with CommonMark (or GFM),
this project goes further by following how the reference parsers (`cmark`,
`cmark-gfm`) work, which is confirmed with thousands of extra tests.
Other than CommonMark and GFM, micromark also supports common extensions to
markdown such as MDX, math, and frontmatter.
These npm packages have a sibling project in Rust:
[`markdown-rs`][markdown-rs].
* to learn markdown, see this [cheatsheet and tutorial][cheat]
* for more about us, see [`unifiedjs.com`][site]
* for updates, see [Twitter][]
* for questions, see [Discussions][chat]
* to help, see [contribute][] and [sponsor][] below
## Install
<!-- Note: this section has to be in sync with the monorepo readme. -->
This package is [ESM only][esm].
In Node.js (version 16+), install with [npm][]:
```sh
npm install micromark
```
In Deno with [`esm.sh`][esmsh]:
```js
import {micromark} from 'https://esm.sh/micromark@3'
```
In browsers with [`esm.sh`][esmsh]:
```html
<script type="module">
import {micromark} from 'https://esm.sh/micromark@3?bundle'
</script>
```
## Use
<!-- Note: this section has to be in sync with the monorepo readme. -->
Typical use (buffering):
```js
import {micromark} from 'micromark'
console.log(micromark('## Hello, *world*!'))
```
Yields:
```html
<h2>Hello, <em>world</em>!</h2>
```
You can pass extensions (in this case [`micromark-extension-gfm`][gfm]):
```js
import {micromark} from 'micromark'
import {gfm, gfmHtml} from 'micromark-extension-gfm'
const value = '* [x] contact@example.com ~~strikethrough~~'
const result = micromark(value, {
extensions: [gfm()],
htmlExtensions: [gfmHtml()]
})
console.log(result)
```
Yields:
```html
<ul>
<li><input checked="" disabled="" type="checkbox"> <a href="mailto:contact@example.com">contact@example.com</a> <del>strikethrough</del></li>
</ul>
```
Streaming interface:
```js
import {createReadStream} from 'node:fs'
import {stream} from 'micromark/stream'
createReadStream('example.md')
.on('error', handleError)
.pipe(stream())
.pipe(process.stdout)
function handleError(error) {
// Handle your error here!
throw error
}
```
## API
`micromark` core has two entries in its export map: `micromark` and
`micromark/stream`.
`micromark` exports the identifier [`micromark`][api-micromark].
`micromark/stream` exports the identifier [`stream`][api-stream].
There are no default exports.
The export map supports the [`development` condition][development].
Run `node --conditions development module.js` to get instrumented dev code.
Without this condition, production code is loaded.
See [§ Size & debug][size-debug] for more info.
### `micromark(value[, encoding][, options])`
Compile markdown to HTML.
###### Parameters
* `value` (`string` or [`Buffer`][buffer])
— markdown to parse
* `encoding` (`string`, default: `'utf8'`)
— [character encoding][encoding] to understand `value` as when its a
`Buffer`
* `options` ([`Options`][api-options], optional)
— configuration
###### Returns
Compiled HTML (`string`).
### `stream(options?)`
Create a duplex (readable and writable) stream.
Some of the work to parse markdown can be done streaming, but in the
end buffering is required.
micromark does not handle errors for you, so you must handle errors on whatever
streams you pipe into it.
As markdown does not know errors, `micromark` itself does not emit errors.
###### Parameters
* `options` ([`Options`][api-options], optional)
— configuration
###### Returns
Duplex stream.
### `Options`
Configuration (TypeScript type).
##### Fields
###### `allowDangerousHtml`
Whether to allow (dangerous) HTML (`boolean`, default: `false`).
The default is `false`, which still parses the HTML according to `CommonMark`
but shows the HTML as text instead of as elements.
Pass `true` for trusted content to get actual HTML elements.
See [§ Security][security].
###### `allowDangerousProtocol`
Whether to allow dangerous protocols in links and images (`boolean`, default:
`false`).
The default is `false`, which drops URLs in links and images that use dangerous
protocols.
Pass `true` for trusted content to support all protocols.
URLs that have no protocol (which means its relative to the current page, such
as `./some/page.html`) and URLs that have a safe protocol (for images: `http`,
`https`; for links: `http`, `https`, `irc`, `ircs`, `mailto`, `xmpp`), are
safe.
All other URLs are dangerous and dropped.
See [§ Security][security].
###### `defaultLineEnding`
Default line ending to use when compiling to HTML, for line endings not in
`value` (`'\r'`, `'\n'`, or `'\r\n'`; default: first line ending or `'\n'`).
Generally, `micromark` copies line endings (`\r`, `\n`, `\r\n`) in the markdown
document over to the compiled HTML.
In some cases, such as `> a`, CommonMark requires that extra line endings are
added: `<blockquote>\n<p>a</p>\n</blockquote>`.
To create that line ending, the document is checked for the first line ending
that is used.
If there is no line ending, `defaultLineEnding` is used.
If that isnt configured, `\n` is used.
###### `extensions`
Array of syntax extensions (`Array<SyntaxExtension>`, default: `[]`).
See [§ Extensions][extensions].
###### `htmlExtensions`
Array of syntax extensions (`Array<HtmlExtension>`, default: `[]`).
See [§ Extensions][extensions].
## Types
This package is fully typed with [TypeScript][].
It exports the additional type [`Options`][api-options].
## Compatibility
Projects maintained by the unified collective are compatible with all maintained
versions of Node.js.
As of now, that is Node.js 16+.
Our projects sometimes work with older versions, but this is not guaranteed.
## Security
This package is safe.
See [`security.md`][securitymd] in [`micromark/.github`][health] for how to
submit a security report.
## Contribute
See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways
to get started.
See [`support.md`][support] for ways to get help.
This project has a [code of conduct][coc].
By interacting with this repository, organisation, or community you agree to
abide by its terms.
## Sponsor
<!-- Note: this section has to be in sync with the monorepo readme. -->
Support this effort and give back by sponsoring on [OpenCollective][]!
<table>
<tr valign="middle">
<td width="100%" align="center" colspan="10">
<br>
<a href="https://www.salesforce.com">Salesforce</a> 🏅<br><br>
<a href="https://www.salesforce.com"><img src="https://images.opencollective.com/salesforce/ca8f997/logo/512.png" width="256"></a>
</td>
</tr>
<tr valign="middle">
<td width="20%" align="center" rowspan="2" colspan="2">
<a href="https://vercel.com">Vercel</a><br><br>
<a href="https://vercel.com"><img src="https://avatars1.githubusercontent.com/u/14985020?s=256&v=4" width="128"></a>
</td>
<td width="20%" align="center" rowspan="2" colspan="2">
<a href="https://motif.land">Motif</a><br><br>
<a href="https://motif.land"><img src="https://avatars1.githubusercontent.com/u/74457950?s=256&v=4" width="128"></a>
</td>
<td width="20%" align="center" rowspan="2" colspan="2">
<a href="https://www.hashicorp.com">HashiCorp</a><br><br>
<a href="https://www.hashicorp.com"><img src="https://avatars1.githubusercontent.com/u/761456?s=256&v=4" width="128"></a>
</td>
<td width="20%" align="center" rowspan="2" colspan="2">
<a href="https://www.gitbook.com">GitBook</a><br><br>
<a href="https://www.gitbook.com"><img src="https://avatars1.githubusercontent.com/u/7111340?s=256&v=4" width="128"></a>
</td>
<td width="20%" align="center" rowspan="2" colspan="2">
<a href="https://www.gatsbyjs.org">Gatsby</a><br><br>
<a href="https://www.gatsbyjs.org"><img src="https://avatars1.githubusercontent.com/u/12551863?s=256&v=4" width="128"></a>
</td>
</tr>
<tr valign="middle">
</tr>
<tr valign="middle">
<td width="20%" align="center" rowspan="2" colspan="2">
<a href="https://www.netlify.com">Netlify</a><br><br>
<!--OC has a sharper image-->
<a href="https://www.netlify.com"><img src="https://images.opencollective.com/netlify/4087de2/logo/256.png" width="128"></a>
</td>
<td width="10%" align="center">
<a href="https://www.coinbase.com">Coinbase</a><br><br>
<a href="https://www.coinbase.com"><img src="https://avatars1.githubusercontent.com/u/1885080?s=256&v=4" width="64"></a>
</td>
<td width="10%" align="center">
<a href="https://themeisle.com">ThemeIsle</a><br><br>
<a href="https://themeisle.com"><img src="https://avatars1.githubusercontent.com/u/58979018?s=128&v=4" width="64"></a>
</td>
<td width="10%" align="center">
<a href="https://expo.io">Expo</a><br><br>
<a href="https://expo.io"><img src="https://avatars1.githubusercontent.com/u/12504344?s=128&v=4" width="64"></a>
</td>
<td width="10%" align="center">
<a href="https://boostnote.io">Boost Note</a><br><br>
<a href="https://boostnote.io"><img src="https://images.opencollective.com/boosthub/6318083/logo/128.png" width="64"></a>
</td>
<td width="10%" align="center">
<a href="https://markdown.space">Markdown Space</a><br><br>
<a href="https://markdown.space"><img src="https://images.opencollective.com/markdown-space/e1038ed/logo/128.png" width="64"></a>
</td>
<td width="10%" align="center">
<a href="https://www.holloway.com">Holloway</a><br><br>
<a href="https://www.holloway.com"><img src="https://avatars1.githubusercontent.com/u/35904294?s=128&v=4" width="64"></a>
</td>
<td width="10%"></td>
<td width="10%"></td>
</tr>
<tr valign="middle">
<td width="100%" align="center" colspan="8">
<br>
<a href="https://opencollective.com/unified"><strong>You?</strong></a>
<br><br>
</td>
</tr>
</table>
## License
[MIT][license] © [Titus Wormer][author]
<!-- Definitions -->
[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg
[build]: https://github.com/micromark/micromark/actions
[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg
[coverage]: https://codecov.io/github/micromark/micromark
[downloads-badge]: https://img.shields.io/npm/dm/micromark.svg
[downloads]: https://www.npmjs.com/package/micromark
[bundle-size-badge]: https://img.shields.io/badge/dynamic/json?label=minzipped%20size&query=$.size.compressedSize&url=https://deno.bundlejs.com/?q=micromark
[bundle-size]: https://bundlejs.com/?q=micromark
[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg
[backers-badge]: https://opencollective.com/unified/backers/badge.svg
[opencollective]: https://opencollective.com/unified
[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg
[chat]: https://github.com/micromark/micromark/discussions
[npm]: https://docs.npmjs.com/cli/install
[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c
[esmsh]: https://esm.sh
[typescript]: https://www.typescriptlang.org
[development]: https://nodejs.org/api/packages.html#packages_resolving_user_conditions
[license]: https://github.com/micromark/micromark/blob/main/license
[author]: https://wooorm.com
[health]: https://github.com/micromark/.github
[securitymd]: https://github.com/micromark/.github/blob/main/security.md
[contributing]: https://github.com/micromark/.github/blob/main/contributing.md
[support]: https://github.com/micromark/.github/blob/main/support.md
[coc]: https://github.com/micromark/.github/blob/main/code-of-conduct.md
[cheat]: https://commonmark.org/help/
[twitter]: https://twitter.com/unifiedjs
[site]: https://unifiedjs.com
[contribute]: #contribute
[encoding]: https://nodejs.org/api/buffer.html#buffer_buffers_and_character_encodings
[buffer]: https://nodejs.org/api/buffer.html
[commonmark]: https://commonmark.org
[directives]: https://github.com/micromark/micromark-extension-directive
[frontmatter]: https://github.com/micromark/micromark-extension-frontmatter
[gfm]: https://github.com/micromark/micromark-extension-gfm
[math]: https://github.com/micromark/micromark-extension-math
[mdxjs]: https://github.com/micromark/micromark-extension-mdxjs
[security]: #security
[sponsor]: #sponsor
[micromark]: https://github.com/micromark/micromark
[extensions]: https://github.com/micromark/micromark#extensions
[test]: https://github.com/micromark/micromark#test
[size-debug]: https://github.com/micromark/micromark#size--debug
[comparison]: https://github.com/micromark/micromark#comparison
[markdown-rs]: https://github.com/wooorm/markdown-rs
[mdast-util-to-markdown]: https://github.com/syntax-tree/mdast-util-to-markdown
[api-micromark]: #micromarkvalue-encoding-options
[api-stream]: #streamoptions
[api-options]: #options

34
node_modules/micromark/stream.d.ts generated vendored Normal file
View file

@ -0,0 +1,34 @@
/**
* Create a duplex (readable and writable) stream.
*
* Some of the work to parse markdown can be done streaming, but in the
* end buffering is required.
*
* micromark does not handle errors for you, so you must handle errors on whatever
* streams you pipe into it.
* As markdown does not know errors, `micromark` itself does not emit errors.
*
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {MinimalDuplex}
* Duplex stream.
*/
export function stream(options?: Options | null | undefined): MinimalDuplex
export type Options = import('micromark-util-types').Options
export type Value = import('micromark-util-types').Value
export type Encoding = import('micromark-util-types').Encoding
/**
* Function called when write was successful.
*/
export type Callback = () => void
export type MinimalDuplex = Omit<
NodeJS.ReadableStream & NodeJS.WritableStream,
| 'isPaused'
| 'pause'
| 'read'
| 'resume'
| 'setEncoding'
| 'unpipe'
| 'unshift'
| 'wrap'
>

239
node_modules/micromark/stream.js generated vendored Normal file
View file

@ -0,0 +1,239 @@
/**
* @typedef {import('micromark-util-types').Options} Options
* @typedef {import('micromark-util-types').Value} Value
* @typedef {import('micromark-util-types').Encoding} Encoding
*/
/**
* @callback Callback
* Function called when write was successful.
* @returns {void}
* Nothing.
*
* @typedef {Omit<NodeJS.ReadableStream & NodeJS.WritableStream, 'isPaused' | 'pause' | 'read' | 'resume' | 'setEncoding' | 'unpipe' | 'unshift' | 'wrap'>} MinimalDuplex
*/
import {EventEmitter} from 'events'
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* Create a duplex (readable and writable) stream.
*
* Some of the work to parse markdown can be done streaming, but in the
* end buffering is required.
*
* micromark does not handle errors for you, so you must handle errors on whatever
* streams you pipe into it.
* As markdown does not know errors, `micromark` itself does not emit errors.
*
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {MinimalDuplex}
* Duplex stream.
*/
export function stream(options) {
const prep = preprocess()
const tokenize = parse(options).document().write
const comp = compile(options)
/** @type {boolean} */
let ended
/** @type {MinimalDuplex} */
// @ts-expect-error `addListener` is fine.
const emitter = Object.assign(new EventEmitter(), {
end,
pipe,
readable: true,
writable: true,
write
})
return emitter
/**
* Write a chunk into memory.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*/
function write(chunk, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
if (ended) {
throw new Error('Did not expect `write` after `end`')
}
tokenize(prep(chunk || '', encoding))
if (callback) {
callback()
}
// Signal successful write.
return true
}
/**
* End the writing.
*
* Passes all arguments as a final `write`.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
*
* @param {Callback | Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Buffer`).
* @param {Callback | Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Buffer`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*/
function end(chunk, encoding, callback) {
if (typeof chunk === 'function') {
encoding = chunk
chunk = undefined
}
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
write(chunk, encoding, callback)
emitter.emit('data', comp(postprocess(tokenize(prep('', encoding, true)))))
emitter.emit('end')
ended = true
return true
}
/**
* Pipe the processor into a writable stream.
*
* Basically `Stream#pipe`, but inlined and simplified to keep the bundled
* size down.
* See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
*
* @template {NodeJS.WritableStream} Stream
* @param {Stream} dest
* @param {{end?: boolean | null | undefined}} [options]
* @returns {Stream}
*/
function pipe(dest, options) {
emitter.on('data', ondata)
emitter.on('error', onerror)
emitter.on('end', cleanup)
emitter.on('close', cleanup)
// If the `end` option is not supplied, `dest.end()` will be
// called when the `end` or `close` events are received.
// @ts-expect-error `_isStdio` is available on `std{err,out}`
if (!dest._isStdio && (!options || options.end !== false)) {
emitter.on('end', onend)
}
dest.on('error', onerror)
dest.on('close', cleanup)
dest.emit('pipe', emitter)
return dest
/**
* End destination stream.
*
* @returns {void}
*/
function onend() {
if (dest.end) {
dest.end()
}
}
/**
* Handle data.
*
* @param {string} chunk
* @returns {void}
*/
function ondata(chunk) {
if (dest.writable) {
dest.write(chunk)
}
}
/**
* Clean listeners.
*
* @returns {void}
*/
function cleanup() {
emitter.removeListener('data', ondata)
emitter.removeListener('end', onend)
emitter.removeListener('error', onerror)
emitter.removeListener('end', cleanup)
emitter.removeListener('close', cleanup)
dest.removeListener('error', onerror)
dest.removeListener('close', cleanup)
}
/**
* Close dangling pipes and handle unheard errors.
*
* @param {Error | null | undefined} [error]
* @returns {void}
*/
function onerror(error) {
cleanup()
if (!emitter.listenerCount('error')) {
throw error // Unhandled stream error in pipe.
}
}
}
}