🎉 initiate project *astro_rewrite*
This commit is contained in:
parent
ffd4d5e86c
commit
2ba37bfbe3
8658 changed files with 2268794 additions and 2538 deletions
14
node_modules/micromark-util-subtokenize/dev/index.d.ts
generated
vendored
Normal file
14
node_modules/micromark-util-subtokenize/dev/index.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
/**
|
||||
* Tokenize subcontent.
|
||||
*
|
||||
* @param {Array<Event>} events
|
||||
* List of events.
|
||||
* @returns {boolean}
|
||||
* Whether subtokens were found.
|
||||
*/
|
||||
export function subtokenize(
|
||||
events: Array<import('micromark-util-types').Event>
|
||||
): boolean
|
||||
export type Chunk = import('micromark-util-types').Chunk
|
||||
export type Event = import('micromark-util-types').Event
|
||||
export type Token = import('micromark-util-types').Token
|
||||
261
node_modules/micromark-util-subtokenize/dev/index.js
generated
vendored
Normal file
261
node_modules/micromark-util-subtokenize/dev/index.js
generated
vendored
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
/**
|
||||
* @typedef {import('micromark-util-types').Chunk} Chunk
|
||||
* @typedef {import('micromark-util-types').Event} Event
|
||||
* @typedef {import('micromark-util-types').Token} Token
|
||||
*/
|
||||
|
||||
import {splice} from 'micromark-util-chunked'
|
||||
import {codes} from 'micromark-util-symbol/codes.js'
|
||||
import {types} from 'micromark-util-symbol/types.js'
|
||||
import {ok as assert} from 'uvu/assert'
|
||||
|
||||
/**
|
||||
* Tokenize subcontent.
|
||||
*
|
||||
* @param {Array<Event>} events
|
||||
* List of events.
|
||||
* @returns {boolean}
|
||||
* Whether subtokens were found.
|
||||
*/
|
||||
export function subtokenize(events) {
|
||||
/** @type {Record<string, number>} */
|
||||
const jumps = {}
|
||||
let index = -1
|
||||
/** @type {Event} */
|
||||
let event
|
||||
/** @type {number | undefined} */
|
||||
let lineIndex
|
||||
/** @type {number} */
|
||||
let otherIndex
|
||||
/** @type {Event} */
|
||||
let otherEvent
|
||||
/** @type {Array<Event>} */
|
||||
let parameters
|
||||
/** @type {Array<Event>} */
|
||||
let subevents
|
||||
/** @type {boolean | undefined} */
|
||||
let more
|
||||
|
||||
while (++index < events.length) {
|
||||
while (index in jumps) {
|
||||
index = jumps[index]
|
||||
}
|
||||
|
||||
event = events[index]
|
||||
|
||||
// Add a hook for the GFM tasklist extension, which needs to know if text
|
||||
// is in the first content of a list item.
|
||||
if (
|
||||
index &&
|
||||
event[1].type === types.chunkFlow &&
|
||||
events[index - 1][1].type === types.listItemPrefix
|
||||
) {
|
||||
assert(event[1]._tokenizer, 'expected `_tokenizer` on subtokens')
|
||||
subevents = event[1]._tokenizer.events
|
||||
otherIndex = 0
|
||||
|
||||
if (
|
||||
otherIndex < subevents.length &&
|
||||
subevents[otherIndex][1].type === types.lineEndingBlank
|
||||
) {
|
||||
otherIndex += 2
|
||||
}
|
||||
|
||||
if (
|
||||
otherIndex < subevents.length &&
|
||||
subevents[otherIndex][1].type === types.content
|
||||
) {
|
||||
while (++otherIndex < subevents.length) {
|
||||
if (subevents[otherIndex][1].type === types.content) {
|
||||
break
|
||||
}
|
||||
|
||||
if (subevents[otherIndex][1].type === types.chunkText) {
|
||||
subevents[otherIndex][1]._isInFirstContentOfListItem = true
|
||||
otherIndex++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enter.
|
||||
if (event[0] === 'enter') {
|
||||
if (event[1].contentType) {
|
||||
Object.assign(jumps, subcontent(events, index))
|
||||
index = jumps[index]
|
||||
more = true
|
||||
}
|
||||
}
|
||||
// Exit.
|
||||
else if (event[1]._container) {
|
||||
otherIndex = index
|
||||
lineIndex = undefined
|
||||
|
||||
while (otherIndex--) {
|
||||
otherEvent = events[otherIndex]
|
||||
|
||||
if (
|
||||
otherEvent[1].type === types.lineEnding ||
|
||||
otherEvent[1].type === types.lineEndingBlank
|
||||
) {
|
||||
if (otherEvent[0] === 'enter') {
|
||||
if (lineIndex) {
|
||||
events[lineIndex][1].type = types.lineEndingBlank
|
||||
}
|
||||
|
||||
otherEvent[1].type = types.lineEnding
|
||||
lineIndex = otherIndex
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (lineIndex) {
|
||||
// Fix position.
|
||||
event[1].end = Object.assign({}, events[lineIndex][1].start)
|
||||
|
||||
// Switch container exit w/ line endings.
|
||||
parameters = events.slice(lineIndex, index)
|
||||
parameters.unshift(event)
|
||||
splice(events, lineIndex, index - lineIndex + 1, parameters)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return !more
|
||||
}
|
||||
|
||||
/**
|
||||
* Tokenize embedded tokens.
|
||||
*
|
||||
* @param {Array<Event>} events
|
||||
* @param {number} eventIndex
|
||||
* @returns {Record<string, number>}
|
||||
*/
|
||||
function subcontent(events, eventIndex) {
|
||||
const token = events[eventIndex][1]
|
||||
const context = events[eventIndex][2]
|
||||
let startPosition = eventIndex - 1
|
||||
/** @type {Array<number>} */
|
||||
const startPositions = []
|
||||
assert(token.contentType, 'expected `contentType` on subtokens')
|
||||
const tokenizer =
|
||||
token._tokenizer || context.parser[token.contentType](token.start)
|
||||
const childEvents = tokenizer.events
|
||||
/** @type {Array<[number, number]>} */
|
||||
const jumps = []
|
||||
/** @type {Record<string, number>} */
|
||||
const gaps = {}
|
||||
/** @type {Array<Chunk>} */
|
||||
let stream
|
||||
/** @type {Token | undefined} */
|
||||
let previous
|
||||
let index = -1
|
||||
/** @type {Token | undefined} */
|
||||
let current = token
|
||||
let adjust = 0
|
||||
let start = 0
|
||||
const breaks = [start]
|
||||
|
||||
// Loop forward through the linked tokens to pass them in order to the
|
||||
// subtokenizer.
|
||||
while (current) {
|
||||
// Find the position of the event for this token.
|
||||
while (events[++startPosition][1] !== current) {
|
||||
// Empty.
|
||||
}
|
||||
|
||||
assert(
|
||||
!previous || current.previous === previous,
|
||||
'expected previous to match'
|
||||
)
|
||||
assert(!previous || previous.next === current, 'expected next to match')
|
||||
|
||||
startPositions.push(startPosition)
|
||||
|
||||
if (!current._tokenizer) {
|
||||
stream = context.sliceStream(current)
|
||||
|
||||
if (!current.next) {
|
||||
stream.push(codes.eof)
|
||||
}
|
||||
|
||||
if (previous) {
|
||||
tokenizer.defineSkip(current.start)
|
||||
}
|
||||
|
||||
if (current._isInFirstContentOfListItem) {
|
||||
tokenizer._gfmTasklistFirstContentOfListItem = true
|
||||
}
|
||||
|
||||
tokenizer.write(stream)
|
||||
|
||||
if (current._isInFirstContentOfListItem) {
|
||||
tokenizer._gfmTasklistFirstContentOfListItem = undefined
|
||||
}
|
||||
}
|
||||
|
||||
// Unravel the next token.
|
||||
previous = current
|
||||
current = current.next
|
||||
}
|
||||
|
||||
// Now, loop back through all events (and linked tokens), to figure out which
|
||||
// parts belong where.
|
||||
current = token
|
||||
|
||||
while (++index < childEvents.length) {
|
||||
if (
|
||||
// Find a void token that includes a break.
|
||||
childEvents[index][0] === 'exit' &&
|
||||
childEvents[index - 1][0] === 'enter' &&
|
||||
childEvents[index][1].type === childEvents[index - 1][1].type &&
|
||||
childEvents[index][1].start.line !== childEvents[index][1].end.line
|
||||
) {
|
||||
assert(current, 'expected a current token')
|
||||
start = index + 1
|
||||
breaks.push(start)
|
||||
// Help GC.
|
||||
current._tokenizer = undefined
|
||||
current.previous = undefined
|
||||
current = current.next
|
||||
}
|
||||
}
|
||||
|
||||
// Help GC.
|
||||
tokenizer.events = []
|
||||
|
||||
// If there’s one more token (which is the cases for lines that end in an
|
||||
// EOF), that’s perfect: the last point we found starts it.
|
||||
// If there isn’t then make sure any remaining content is added to it.
|
||||
if (current) {
|
||||
// Help GC.
|
||||
current._tokenizer = undefined
|
||||
current.previous = undefined
|
||||
assert(!current.next, 'expected no next token')
|
||||
} else {
|
||||
breaks.pop()
|
||||
}
|
||||
|
||||
// Now splice the events from the subtokenizer into the current events,
|
||||
// moving back to front so that splice indices aren’t affected.
|
||||
index = breaks.length
|
||||
|
||||
while (index--) {
|
||||
const slice = childEvents.slice(breaks[index], breaks[index + 1])
|
||||
const start = startPositions.pop()
|
||||
assert(start !== undefined, 'expected a start position when splicing')
|
||||
jumps.unshift([start, start + slice.length - 1])
|
||||
splice(events, start, 2, slice)
|
||||
}
|
||||
|
||||
index = -1
|
||||
|
||||
while (++index < jumps.length) {
|
||||
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
|
||||
adjust += jumps[index][1] - jumps[index][0] - 1
|
||||
}
|
||||
|
||||
return gaps
|
||||
}
|
||||
14
node_modules/micromark-util-subtokenize/index.d.ts
generated
vendored
Normal file
14
node_modules/micromark-util-subtokenize/index.d.ts
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
/**
|
||||
* Tokenize subcontent.
|
||||
*
|
||||
* @param {Array<Event>} events
|
||||
* List of events.
|
||||
* @returns {boolean}
|
||||
* Whether subtokens were found.
|
||||
*/
|
||||
export function subtokenize(
|
||||
events: Array<import('micromark-util-types').Event>
|
||||
): boolean
|
||||
export type Chunk = import('micromark-util-types').Chunk
|
||||
export type Event = import('micromark-util-types').Event
|
||||
export type Token = import('micromark-util-types').Token
|
||||
224
node_modules/micromark-util-subtokenize/index.js
generated
vendored
Normal file
224
node_modules/micromark-util-subtokenize/index.js
generated
vendored
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
/**
|
||||
* @typedef {import('micromark-util-types').Chunk} Chunk
|
||||
* @typedef {import('micromark-util-types').Event} Event
|
||||
* @typedef {import('micromark-util-types').Token} Token
|
||||
*/
|
||||
|
||||
import {splice} from 'micromark-util-chunked'
|
||||
/**
|
||||
* Tokenize subcontent.
|
||||
*
|
||||
* @param {Array<Event>} events
|
||||
* List of events.
|
||||
* @returns {boolean}
|
||||
* Whether subtokens were found.
|
||||
*/
|
||||
export function subtokenize(events) {
|
||||
/** @type {Record<string, number>} */
|
||||
const jumps = {}
|
||||
let index = -1
|
||||
/** @type {Event} */
|
||||
let event
|
||||
/** @type {number | undefined} */
|
||||
let lineIndex
|
||||
/** @type {number} */
|
||||
let otherIndex
|
||||
/** @type {Event} */
|
||||
let otherEvent
|
||||
/** @type {Array<Event>} */
|
||||
let parameters
|
||||
/** @type {Array<Event>} */
|
||||
let subevents
|
||||
/** @type {boolean | undefined} */
|
||||
let more
|
||||
while (++index < events.length) {
|
||||
while (index in jumps) {
|
||||
index = jumps[index]
|
||||
}
|
||||
event = events[index]
|
||||
|
||||
// Add a hook for the GFM tasklist extension, which needs to know if text
|
||||
// is in the first content of a list item.
|
||||
if (
|
||||
index &&
|
||||
event[1].type === 'chunkFlow' &&
|
||||
events[index - 1][1].type === 'listItemPrefix'
|
||||
) {
|
||||
subevents = event[1]._tokenizer.events
|
||||
otherIndex = 0
|
||||
if (
|
||||
otherIndex < subevents.length &&
|
||||
subevents[otherIndex][1].type === 'lineEndingBlank'
|
||||
) {
|
||||
otherIndex += 2
|
||||
}
|
||||
if (
|
||||
otherIndex < subevents.length &&
|
||||
subevents[otherIndex][1].type === 'content'
|
||||
) {
|
||||
while (++otherIndex < subevents.length) {
|
||||
if (subevents[otherIndex][1].type === 'content') {
|
||||
break
|
||||
}
|
||||
if (subevents[otherIndex][1].type === 'chunkText') {
|
||||
subevents[otherIndex][1]._isInFirstContentOfListItem = true
|
||||
otherIndex++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enter.
|
||||
if (event[0] === 'enter') {
|
||||
if (event[1].contentType) {
|
||||
Object.assign(jumps, subcontent(events, index))
|
||||
index = jumps[index]
|
||||
more = true
|
||||
}
|
||||
}
|
||||
// Exit.
|
||||
else if (event[1]._container) {
|
||||
otherIndex = index
|
||||
lineIndex = undefined
|
||||
while (otherIndex--) {
|
||||
otherEvent = events[otherIndex]
|
||||
if (
|
||||
otherEvent[1].type === 'lineEnding' ||
|
||||
otherEvent[1].type === 'lineEndingBlank'
|
||||
) {
|
||||
if (otherEvent[0] === 'enter') {
|
||||
if (lineIndex) {
|
||||
events[lineIndex][1].type = 'lineEndingBlank'
|
||||
}
|
||||
otherEvent[1].type = 'lineEnding'
|
||||
lineIndex = otherIndex
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if (lineIndex) {
|
||||
// Fix position.
|
||||
event[1].end = Object.assign({}, events[lineIndex][1].start)
|
||||
|
||||
// Switch container exit w/ line endings.
|
||||
parameters = events.slice(lineIndex, index)
|
||||
parameters.unshift(event)
|
||||
splice(events, lineIndex, index - lineIndex + 1, parameters)
|
||||
}
|
||||
}
|
||||
}
|
||||
return !more
|
||||
}
|
||||
|
||||
/**
|
||||
* Tokenize embedded tokens.
|
||||
*
|
||||
* @param {Array<Event>} events
|
||||
* @param {number} eventIndex
|
||||
* @returns {Record<string, number>}
|
||||
*/
|
||||
function subcontent(events, eventIndex) {
|
||||
const token = events[eventIndex][1]
|
||||
const context = events[eventIndex][2]
|
||||
let startPosition = eventIndex - 1
|
||||
/** @type {Array<number>} */
|
||||
const startPositions = []
|
||||
const tokenizer =
|
||||
token._tokenizer || context.parser[token.contentType](token.start)
|
||||
const childEvents = tokenizer.events
|
||||
/** @type {Array<[number, number]>} */
|
||||
const jumps = []
|
||||
/** @type {Record<string, number>} */
|
||||
const gaps = {}
|
||||
/** @type {Array<Chunk>} */
|
||||
let stream
|
||||
/** @type {Token | undefined} */
|
||||
let previous
|
||||
let index = -1
|
||||
/** @type {Token | undefined} */
|
||||
let current = token
|
||||
let adjust = 0
|
||||
let start = 0
|
||||
const breaks = [start]
|
||||
|
||||
// Loop forward through the linked tokens to pass them in order to the
|
||||
// subtokenizer.
|
||||
while (current) {
|
||||
// Find the position of the event for this token.
|
||||
while (events[++startPosition][1] !== current) {
|
||||
// Empty.
|
||||
}
|
||||
startPositions.push(startPosition)
|
||||
if (!current._tokenizer) {
|
||||
stream = context.sliceStream(current)
|
||||
if (!current.next) {
|
||||
stream.push(null)
|
||||
}
|
||||
if (previous) {
|
||||
tokenizer.defineSkip(current.start)
|
||||
}
|
||||
if (current._isInFirstContentOfListItem) {
|
||||
tokenizer._gfmTasklistFirstContentOfListItem = true
|
||||
}
|
||||
tokenizer.write(stream)
|
||||
if (current._isInFirstContentOfListItem) {
|
||||
tokenizer._gfmTasklistFirstContentOfListItem = undefined
|
||||
}
|
||||
}
|
||||
|
||||
// Unravel the next token.
|
||||
previous = current
|
||||
current = current.next
|
||||
}
|
||||
|
||||
// Now, loop back through all events (and linked tokens), to figure out which
|
||||
// parts belong where.
|
||||
current = token
|
||||
while (++index < childEvents.length) {
|
||||
if (
|
||||
// Find a void token that includes a break.
|
||||
childEvents[index][0] === 'exit' &&
|
||||
childEvents[index - 1][0] === 'enter' &&
|
||||
childEvents[index][1].type === childEvents[index - 1][1].type &&
|
||||
childEvents[index][1].start.line !== childEvents[index][1].end.line
|
||||
) {
|
||||
start = index + 1
|
||||
breaks.push(start)
|
||||
// Help GC.
|
||||
current._tokenizer = undefined
|
||||
current.previous = undefined
|
||||
current = current.next
|
||||
}
|
||||
}
|
||||
|
||||
// Help GC.
|
||||
tokenizer.events = []
|
||||
|
||||
// If there’s one more token (which is the cases for lines that end in an
|
||||
// EOF), that’s perfect: the last point we found starts it.
|
||||
// If there isn’t then make sure any remaining content is added to it.
|
||||
if (current) {
|
||||
// Help GC.
|
||||
current._tokenizer = undefined
|
||||
current.previous = undefined
|
||||
} else {
|
||||
breaks.pop()
|
||||
}
|
||||
|
||||
// Now splice the events from the subtokenizer into the current events,
|
||||
// moving back to front so that splice indices aren’t affected.
|
||||
index = breaks.length
|
||||
while (index--) {
|
||||
const slice = childEvents.slice(breaks[index], breaks[index + 1])
|
||||
const start = startPositions.pop()
|
||||
jumps.unshift([start, start + slice.length - 1])
|
||||
splice(events, start, 2, slice)
|
||||
}
|
||||
index = -1
|
||||
while (++index < jumps.length) {
|
||||
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
|
||||
adjust += jumps[index][1] - jumps[index][0] - 1
|
||||
}
|
||||
return gaps
|
||||
}
|
||||
1
node_modules/micromark-util-subtokenize/node_modules/.bin/uvu
generated
vendored
Symbolic link
1
node_modules/micromark-util-subtokenize/node_modules/.bin/uvu
generated
vendored
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../../uvu/bin.js
|
||||
58
node_modules/micromark-util-subtokenize/package.json
generated
vendored
Normal file
58
node_modules/micromark-util-subtokenize/package.json
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
{
|
||||
"name": "micromark-util-subtokenize",
|
||||
"version": "1.1.0",
|
||||
"description": "micromark utility to tokenize subtokens",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"micromark",
|
||||
"util",
|
||||
"utility",
|
||||
"tokenize"
|
||||
],
|
||||
"repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-subtokenize",
|
||||
"bugs": "https://github.com/micromark/micromark/issues",
|
||||
"funding": [
|
||||
{
|
||||
"type": "GitHub Sponsors",
|
||||
"url": "https://github.com/sponsors/unifiedjs"
|
||||
},
|
||||
{
|
||||
"type": "OpenCollective",
|
||||
"url": "https://opencollective.com/unified"
|
||||
}
|
||||
],
|
||||
"author": "Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)",
|
||||
"contributors": [
|
||||
"Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)"
|
||||
],
|
||||
"sideEffects": false,
|
||||
"type": "module",
|
||||
"main": "index.js",
|
||||
"types": "dev/index.d.ts",
|
||||
"files": [
|
||||
"dev/",
|
||||
"index.d.ts",
|
||||
"index.js"
|
||||
],
|
||||
"exports": {
|
||||
"types": "./dev/index.d.ts",
|
||||
"development": "./dev/index.js",
|
||||
"default": "./index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"micromark-util-chunked": "^1.0.0",
|
||||
"micromark-util-symbol": "^1.0.0",
|
||||
"micromark-util-types": "^1.0.0",
|
||||
"uvu": "^0.5.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "micromark-build"
|
||||
},
|
||||
"xo": false,
|
||||
"typeCoverage": {
|
||||
"atLeast": 100,
|
||||
"detail": true,
|
||||
"strict": true,
|
||||
"ignoreCatch": true
|
||||
}
|
||||
}
|
||||
179
node_modules/micromark-util-subtokenize/readme.md
generated
vendored
Normal file
179
node_modules/micromark-util-subtokenize/readme.md
generated
vendored
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
# micromark-util-subtokenize
|
||||
|
||||
[![Build][build-badge]][build]
|
||||
[![Coverage][coverage-badge]][coverage]
|
||||
[![Downloads][downloads-badge]][downloads]
|
||||
[![Size][bundle-size-badge]][bundle-size]
|
||||
[![Sponsors][sponsors-badge]][opencollective]
|
||||
[![Backers][backers-badge]][opencollective]
|
||||
[![Chat][chat-badge]][chat]
|
||||
|
||||
[micromark][] utility to tokenize subtokens.
|
||||
|
||||
## Contents
|
||||
|
||||
* [What is this?](#what-is-this)
|
||||
* [When should I use this?](#when-should-i-use-this)
|
||||
* [Install](#install)
|
||||
* [Use](#use)
|
||||
* [API](#api)
|
||||
* [`subtokenize(events)`](#subtokenizeevents)
|
||||
* [Types](#types)
|
||||
* [Compatibility](#compatibility)
|
||||
* [Security](#security)
|
||||
* [Contribute](#contribute)
|
||||
* [License](#license)
|
||||
|
||||
## What is this?
|
||||
|
||||
This package exposes a micromark internal that you probably don’t need.
|
||||
|
||||
## When should I use this?
|
||||
|
||||
This package might be useful when you are making your own micromark extensions.
|
||||
|
||||
## Install
|
||||
|
||||
This package is [ESM only][esm].
|
||||
In Node.js (version 16+), install with [npm][]:
|
||||
|
||||
```sh
|
||||
npm install micromark-util-subtokenize
|
||||
```
|
||||
|
||||
In Deno with [`esm.sh`][esmsh]:
|
||||
|
||||
```js
|
||||
import {subtokenize} from 'https://esm.sh/micromark-util-subtokenize@1'
|
||||
```
|
||||
|
||||
In browsers with [`esm.sh`][esmsh]:
|
||||
|
||||
```html
|
||||
<script type="module">
|
||||
import {subtokenize} from 'https://esm.sh/micromark-util-subtokenize@1?bundle'
|
||||
</script>
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
import {subtokenize} from 'micromark-util-subtokenize'
|
||||
|
||||
/**
|
||||
* Content is transparent: it’s parsed right now. That way, definitions are also
|
||||
* parsed right now: before text in paragraphs (specifically, media) are parsed.
|
||||
*
|
||||
* @type {Resolver}
|
||||
*/
|
||||
function resolveContent(events) {
|
||||
subtokenize(events)
|
||||
return events
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
This module exports the identifiers [`subtokenize`][api-subtokenize].
|
||||
There is no default export.
|
||||
|
||||
### `subtokenize(events)`
|
||||
|
||||
Tokenize subcontent.
|
||||
|
||||
###### Parameters
|
||||
|
||||
* `events` (`Array<Event>`)
|
||||
— list of events
|
||||
|
||||
###### Returns
|
||||
|
||||
Whether subtokens were found (`boolean`).
|
||||
|
||||
## Types
|
||||
|
||||
This package is fully typed with [TypeScript][].
|
||||
It exports no additional types.
|
||||
|
||||
## Compatibility
|
||||
|
||||
Projects maintained by the unified collective are compatible with all maintained
|
||||
versions of Node.js.
|
||||
As of now, that is Node.js 16+.
|
||||
Our projects sometimes work with older versions, but this is not guaranteed.
|
||||
|
||||
This package works with `micromark` version 3+.
|
||||
|
||||
## Security
|
||||
|
||||
This package is safe.
|
||||
See [`security.md`][securitymd] in [`micromark/.github`][health] for how to
|
||||
submit a security report.
|
||||
|
||||
## Contribute
|
||||
|
||||
See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways
|
||||
to get started.
|
||||
See [`support.md`][support] for ways to get help.
|
||||
|
||||
This project has a [code of conduct][coc].
|
||||
By interacting with this repository, organisation, or community you agree to
|
||||
abide by its terms.
|
||||
|
||||
## License
|
||||
|
||||
[MIT][license] © [Titus Wormer][author]
|
||||
|
||||
<!-- Definitions -->
|
||||
|
||||
[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg
|
||||
|
||||
[build]: https://github.com/micromark/micromark/actions
|
||||
|
||||
[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg
|
||||
|
||||
[coverage]: https://codecov.io/github/micromark/micromark
|
||||
|
||||
[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-subtokenize.svg
|
||||
|
||||
[downloads]: https://www.npmjs.com/package/micromark-util-subtokenize
|
||||
|
||||
[bundle-size-badge]: https://img.shields.io/badge/dynamic/json?label=minzipped%20size&query=$.size.compressedSize&url=https://deno.bundlejs.com/?q=micromark-util-subtokenize
|
||||
|
||||
[bundle-size]: https://bundlejs.com/?q=micromark-util-subtokenize
|
||||
|
||||
[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg
|
||||
|
||||
[backers-badge]: https://opencollective.com/unified/backers/badge.svg
|
||||
|
||||
[opencollective]: https://opencollective.com/unified
|
||||
|
||||
[npm]: https://docs.npmjs.com/cli/install
|
||||
|
||||
[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c
|
||||
|
||||
[esmsh]: https://esm.sh
|
||||
|
||||
[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg
|
||||
|
||||
[chat]: https://github.com/micromark/micromark/discussions
|
||||
|
||||
[license]: https://github.com/micromark/micromark/blob/main/license
|
||||
|
||||
[author]: https://wooorm.com
|
||||
|
||||
[health]: https://github.com/micromark/.github
|
||||
|
||||
[securitymd]: https://github.com/micromark/.github/blob/main/security.md
|
||||
|
||||
[contributing]: https://github.com/micromark/.github/blob/main/contributing.md
|
||||
|
||||
[support]: https://github.com/micromark/.github/blob/main/support.md
|
||||
|
||||
[coc]: https://github.com/micromark/.github/blob/main/code-of-conduct.md
|
||||
|
||||
[typescript]: https://www.typescriptlang.org
|
||||
|
||||
[micromark]: https://github.com/micromark/micromark
|
||||
|
||||
[api-subtokenize]: #subtokenizeevents
|
||||
Loading…
Add table
Add a link
Reference in a new issue