2021-10-26 00:02:16 +08:00
|
|
|
//! This module contains the functionality toggle comments on lines over the selection
|
|
|
|
//! using the comment character defined in the user's `languages.toml`
|
|
|
|
|
2024-02-27 21:36:25 +08:00
|
|
|
use smallvec::SmallVec;
|
|
|
|
|
2025-02-02 05:14:35 +08:00
|
|
|
use crate::{syntax::BlockCommentToken, Change, Range, Rope, RopeSlice, Tendril};
|
2024-02-27 21:36:25 +08:00
|
|
|
use helix_stdx::rope::RopeSliceExt;
|
2021-02-18 17:35:39 +08:00
|
|
|
use std::borrow::Cow;
|
|
|
|
|
2024-12-05 08:11:39 +08:00
|
|
|
pub const DEFAULT_COMMENT_TOKEN: &str = "#";
|
2024-10-19 17:48:07 +08:00
|
|
|
|
|
|
|
/// Returns the longest matching comment token of the given line (if it exists).
|
|
|
|
pub fn get_comment_token<'a, S: AsRef<str>>(
|
|
|
|
text: RopeSlice,
|
|
|
|
tokens: &'a [S],
|
|
|
|
line_num: usize,
|
|
|
|
) -> Option<&'a str> {
|
|
|
|
let line = text.line(line_num);
|
|
|
|
let start = line.first_non_whitespace_char()?;
|
|
|
|
|
|
|
|
tokens
|
|
|
|
.iter()
|
|
|
|
.map(AsRef::as_ref)
|
|
|
|
.filter(|token| line.slice(start..).starts_with(token))
|
|
|
|
.max_by_key(|token| token.len())
|
|
|
|
}
|
|
|
|
|
2021-07-26 10:00:58 +08:00
|
|
|
/// Given text, a comment token, and a set of line indices, returns the following:
|
|
|
|
/// - Whether the given lines should be considered commented
|
|
|
|
/// - If any of the lines are uncommented, all lines are considered as such.
|
|
|
|
/// - The lines to change for toggling comments
|
|
|
|
/// - This is all provided lines excluding blanks lines.
|
|
|
|
/// - The column of the comment tokens
|
|
|
|
/// - Column of existing tokens, if the lines are commented; column to place tokens at otherwise.
|
|
|
|
/// - The margin to the right of the comment tokens
|
|
|
|
/// - Defaults to `1`. If any existing comment token is not followed by a space, changes to `0`.
|
2025-02-02 02:49:30 +08:00
|
|
|
fn find_line_comment(
|
|
|
|
token: &str,
|
2021-02-18 17:35:39 +08:00
|
|
|
text: RopeSlice,
|
2025-02-02 02:49:30 +08:00
|
|
|
lines: impl IntoIterator<Item = usize>,
|
2021-07-26 10:00:58 +08:00
|
|
|
) -> (bool, Vec<usize>, usize, usize) {
|
2021-02-18 17:35:39 +08:00
|
|
|
let mut commented = true;
|
2021-07-26 10:00:58 +08:00
|
|
|
let mut to_change = Vec::new();
|
2024-02-27 21:36:25 +08:00
|
|
|
let mut min = usize::MAX; // minimum col for first_non_whitespace_char
|
2021-07-26 10:00:58 +08:00
|
|
|
let mut margin = 1;
|
2025-02-02 02:49:30 +08:00
|
|
|
let token_len = token.chars().count();
|
2024-10-19 17:48:07 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
for line in lines {
|
2021-02-18 17:35:39 +08:00
|
|
|
let line_slice = text.line(line);
|
2024-02-27 21:36:25 +08:00
|
|
|
if let Some(pos) = line_slice.first_non_whitespace_char() {
|
2021-02-18 17:35:39 +08:00
|
|
|
let len = line_slice.len_chars();
|
|
|
|
|
2024-10-19 17:48:07 +08:00
|
|
|
min = std::cmp::min(min, pos);
|
2021-02-18 17:35:39 +08:00
|
|
|
|
|
|
|
// line can be shorter than pos + token len
|
|
|
|
let fragment = Cow::from(line_slice.slice(pos..std::cmp::min(pos + token.len(), len)));
|
|
|
|
|
2024-10-19 17:48:07 +08:00
|
|
|
// as soon as one of the non-blank lines doesn't have a comment, the whole block is
|
|
|
|
// considered uncommented.
|
2021-02-18 17:35:39 +08:00
|
|
|
if fragment != token {
|
|
|
|
commented = false;
|
|
|
|
}
|
2021-07-26 10:00:58 +08:00
|
|
|
|
|
|
|
// determine margin of 0 or 1 for uncommenting; if any comment token is not followed by a space,
|
|
|
|
// a margin of 0 is used for all lines.
|
2023-02-13 03:13:22 +08:00
|
|
|
if !matches!(line_slice.get_char(pos + token_len), Some(c) if c == ' ') {
|
2021-07-26 10:00:58 +08:00
|
|
|
margin = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// blank lines don't get pushed.
|
|
|
|
to_change.push(line);
|
2021-02-18 17:35:39 +08:00
|
|
|
}
|
|
|
|
}
|
2024-10-19 17:48:07 +08:00
|
|
|
|
2021-07-26 10:00:58 +08:00
|
|
|
(commented, to_change, min, margin)
|
2021-02-18 17:35:39 +08:00
|
|
|
}
|
|
|
|
|
2025-02-02 00:33:54 +08:00
|
|
|
// for a given range and syntax, determine if there are additional tokens to consider
|
2025-02-02 05:00:58 +08:00
|
|
|
pub type GetInjectedTokens<'a> =
|
2025-02-02 02:17:01 +08:00
|
|
|
Box<dyn FnMut(usize, usize) -> (Option<Vec<String>>, Option<Vec<BlockCommentToken>>) + 'a>;
|
2025-02-02 00:33:54 +08:00
|
|
|
|
2021-03-24 13:52:13 +08:00
|
|
|
#[must_use]
|
2025-02-02 02:49:30 +08:00
|
|
|
pub fn toggle_line_comments(doc: &Rope, range: &Range, token: Option<&str>) -> Vec<Change> {
|
|
|
|
let text = doc.slice(..);
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
let token = token.unwrap_or(DEFAULT_COMMENT_TOKEN);
|
|
|
|
let comment = Tendril::from(format!("{} ", token));
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
let start = text.char_to_line(range.from());
|
|
|
|
let end = text.char_to_line(range.to());
|
2021-07-26 10:00:58 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
let start = start.clamp(0, text.len_lines());
|
|
|
|
let end = (end + 1).min(text.len_lines());
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
// let start_byte = text.line_to_byte(start);
|
|
|
|
// let end_byte = text.line_to_byte(start);
|
2025-02-02 00:56:12 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
let mut lines = vec![];
|
|
|
|
lines.extend(start..end);
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
let (commented, to_change, min, margin) = find_line_comment(token, text, lines);
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
let mut changes: Vec<Change> = Vec::with_capacity(to_change.len());
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
for line in to_change {
|
|
|
|
let pos = text.line_to_char(line) + min;
|
2021-02-18 17:35:39 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
if !commented {
|
|
|
|
// comment line
|
|
|
|
changes.push((pos, pos, Some(comment.clone())));
|
|
|
|
} else {
|
|
|
|
// uncomment line
|
|
|
|
changes.push((pos, pos + token.len() + margin, None));
|
|
|
|
}
|
|
|
|
}
|
2021-07-26 10:00:58 +08:00
|
|
|
|
2025-02-02 02:49:30 +08:00
|
|
|
changes
|
2021-02-18 17:35:39 +08:00
|
|
|
}
|
|
|
|
|
2024-02-27 21:36:25 +08:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
pub enum CommentChange {
|
|
|
|
Commented {
|
|
|
|
range: Range,
|
|
|
|
start_pos: usize,
|
|
|
|
end_pos: usize,
|
|
|
|
start_margin: bool,
|
|
|
|
end_margin: bool,
|
|
|
|
start_token: String,
|
|
|
|
end_token: String,
|
|
|
|
},
|
|
|
|
Uncommented {
|
|
|
|
range: Range,
|
|
|
|
start_pos: usize,
|
|
|
|
end_pos: usize,
|
|
|
|
start_token: String,
|
|
|
|
end_token: String,
|
|
|
|
},
|
|
|
|
Whitespace {
|
|
|
|
range: Range,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn find_block_comments(
|
|
|
|
tokens: &[BlockCommentToken],
|
|
|
|
text: RopeSlice,
|
2025-02-02 04:35:45 +08:00
|
|
|
ranges: &Vec<Range>,
|
2024-02-27 21:36:25 +08:00
|
|
|
) -> (bool, Vec<CommentChange>) {
|
|
|
|
let mut commented = true;
|
|
|
|
let mut only_whitespace = true;
|
2025-02-02 04:35:45 +08:00
|
|
|
let mut comment_changes = Vec::with_capacity(ranges.len());
|
2024-02-27 21:36:25 +08:00
|
|
|
let default_tokens = tokens.first().cloned().unwrap_or_default();
|
|
|
|
let mut start_token = default_tokens.start.clone();
|
|
|
|
let mut end_token = default_tokens.end.clone();
|
|
|
|
|
|
|
|
let mut tokens = tokens.to_vec();
|
|
|
|
// sort the tokens by length, so longer tokens will match first
|
|
|
|
tokens.sort_by(|a, b| {
|
|
|
|
if a.start.len() == b.start.len() {
|
|
|
|
b.end.len().cmp(&a.end.len())
|
|
|
|
} else {
|
|
|
|
b.start.len().cmp(&a.start.len())
|
|
|
|
}
|
|
|
|
});
|
2025-02-02 04:35:45 +08:00
|
|
|
for range in ranges {
|
2024-02-27 21:36:25 +08:00
|
|
|
let selection_slice = range.slice(text);
|
|
|
|
if let (Some(start_pos), Some(end_pos)) = (
|
|
|
|
selection_slice.first_non_whitespace_char(),
|
|
|
|
selection_slice.last_non_whitespace_char(),
|
|
|
|
) {
|
|
|
|
let mut line_commented = false;
|
|
|
|
let mut after_start = 0;
|
|
|
|
let mut before_end = 0;
|
|
|
|
let len = (end_pos + 1) - start_pos;
|
|
|
|
|
|
|
|
for BlockCommentToken { start, end } in &tokens {
|
|
|
|
let start_len = start.chars().count();
|
|
|
|
let end_len = end.chars().count();
|
|
|
|
after_start = start_pos + start_len;
|
|
|
|
before_end = end_pos.saturating_sub(end_len);
|
|
|
|
|
|
|
|
if len >= start_len + end_len {
|
|
|
|
let start_fragment = selection_slice.slice(start_pos..after_start);
|
|
|
|
let end_fragment = selection_slice.slice(before_end + 1..end_pos + 1);
|
|
|
|
|
|
|
|
// block commented with these tokens
|
|
|
|
if start_fragment == start.as_str() && end_fragment == end.as_str() {
|
|
|
|
start_token = start.to_string();
|
|
|
|
end_token = end.to_string();
|
|
|
|
line_commented = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !line_commented {
|
|
|
|
comment_changes.push(CommentChange::Uncommented {
|
|
|
|
range: *range,
|
|
|
|
start_pos,
|
|
|
|
end_pos,
|
|
|
|
start_token: default_tokens.start.clone(),
|
|
|
|
end_token: default_tokens.end.clone(),
|
|
|
|
});
|
|
|
|
commented = false;
|
|
|
|
} else {
|
|
|
|
comment_changes.push(CommentChange::Commented {
|
|
|
|
range: *range,
|
|
|
|
start_pos,
|
|
|
|
end_pos,
|
2025-01-10 01:02:21 +08:00
|
|
|
start_margin: selection_slice.get_char(after_start) == Some(' '),
|
2024-02-27 21:36:25 +08:00
|
|
|
end_margin: after_start != before_end
|
2025-01-10 01:02:21 +08:00
|
|
|
&& (selection_slice.get_char(before_end) == Some(' ')),
|
2024-02-27 21:36:25 +08:00
|
|
|
start_token: start_token.to_string(),
|
|
|
|
end_token: end_token.to_string(),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
only_whitespace = false;
|
|
|
|
} else {
|
|
|
|
comment_changes.push(CommentChange::Whitespace { range: *range });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if only_whitespace {
|
|
|
|
commented = false;
|
|
|
|
}
|
|
|
|
(commented, comment_changes)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[must_use]
|
|
|
|
pub fn create_block_comment_transaction(
|
2025-02-02 05:14:35 +08:00
|
|
|
_doc: &Rope,
|
|
|
|
ranges: &[Range],
|
2025-02-02 11:35:33 +08:00
|
|
|
was_commented: bool,
|
2024-02-27 21:36:25 +08:00
|
|
|
comment_changes: Vec<CommentChange>,
|
2025-02-02 04:35:45 +08:00
|
|
|
) -> (Vec<Change>, SmallVec<[Range; 1]>) {
|
|
|
|
let mut changes: Vec<Change> = Vec::with_capacity(ranges.len() * 2);
|
|
|
|
let mut ranges: SmallVec<[Range; 1]> = SmallVec::with_capacity(ranges.len());
|
2024-02-27 21:36:25 +08:00
|
|
|
let mut offs = 0;
|
|
|
|
for change in comment_changes {
|
2025-02-02 11:35:33 +08:00
|
|
|
if was_commented {
|
2024-02-27 21:36:25 +08:00
|
|
|
if let CommentChange::Commented {
|
|
|
|
range,
|
|
|
|
start_pos,
|
|
|
|
end_pos,
|
|
|
|
start_token,
|
|
|
|
end_token,
|
|
|
|
start_margin,
|
|
|
|
end_margin,
|
|
|
|
} = change
|
|
|
|
{
|
|
|
|
let from = range.from();
|
2025-02-02 11:35:33 +08:00
|
|
|
let keep_from = from + start_pos + start_token.len() + start_margin as usize;
|
|
|
|
changes.push((from + start_pos, keep_from, None));
|
|
|
|
let keep_until = from + end_pos - end_token.len() - end_margin as usize + 1;
|
|
|
|
changes.push((keep_until, from + end_pos + 1, None));
|
2025-02-02 17:31:43 +08:00
|
|
|
// The range of characters keep_from..keep_until remain in the document
|
2025-02-02 11:35:33 +08:00
|
|
|
ranges.push(Range::new(keep_from, keep_until).with_direction(range.direction()));
|
2024-02-27 21:36:25 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// uncommented so manually map ranges through changes
|
|
|
|
match change {
|
|
|
|
CommentChange::Uncommented {
|
|
|
|
range,
|
|
|
|
start_pos,
|
|
|
|
end_pos,
|
|
|
|
start_token,
|
|
|
|
end_token,
|
|
|
|
} => {
|
|
|
|
let from = range.from();
|
|
|
|
changes.push((
|
|
|
|
from + start_pos,
|
|
|
|
from + start_pos,
|
|
|
|
Some(Tendril::from(format!("{} ", start_token))),
|
|
|
|
));
|
|
|
|
changes.push((
|
|
|
|
from + end_pos + 1,
|
|
|
|
from + end_pos + 1,
|
|
|
|
Some(Tendril::from(format!(" {}", end_token))),
|
|
|
|
));
|
|
|
|
|
|
|
|
let offset = start_token.chars().count() + end_token.chars().count() + 2;
|
|
|
|
ranges.push(
|
|
|
|
Range::new(from + offs, from + offs + end_pos + 1 + offset)
|
|
|
|
.with_direction(range.direction()),
|
|
|
|
);
|
|
|
|
offs += offset;
|
|
|
|
}
|
|
|
|
CommentChange::Commented { range, .. } | CommentChange::Whitespace { range } => {
|
|
|
|
ranges.push(Range::new(range.from() + offs, range.to() + offs));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2025-02-02 04:35:45 +08:00
|
|
|
(changes, ranges)
|
2024-02-27 21:36:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[must_use]
|
|
|
|
pub fn toggle_block_comments(
|
|
|
|
doc: &Rope,
|
2025-02-02 04:53:07 +08:00
|
|
|
ranges: &Vec<Range>,
|
2024-02-27 21:36:25 +08:00
|
|
|
tokens: &[BlockCommentToken],
|
2025-02-02 06:50:44 +08:00
|
|
|
selections: &mut SmallVec<[Range; 1]>,
|
2025-02-02 11:35:33 +08:00
|
|
|
added_chars: &mut isize,
|
2025-02-02 06:50:44 +08:00
|
|
|
) -> Vec<Change> {
|
2025-02-02 05:00:58 +08:00
|
|
|
let text = doc.slice(..);
|
2025-02-02 11:35:33 +08:00
|
|
|
let (was_commented, comment_changes) = find_block_comments(tokens, text, ranges);
|
2025-02-02 06:50:44 +08:00
|
|
|
let (changes, new_ranges) =
|
2025-02-02 11:35:33 +08:00
|
|
|
create_block_comment_transaction(doc, ranges, was_commented, comment_changes);
|
|
|
|
|
|
|
|
if was_commented {
|
|
|
|
for (i, range) in new_ranges.iter().enumerate() {
|
|
|
|
// every 2 elements (from, to) in `changes` corresponds
|
|
|
|
// the `from` - `to` represents the range of text that will be deleted.
|
|
|
|
// to 1 element in `new_ranges`
|
2025-02-03 02:09:57 +08:00
|
|
|
//
|
|
|
|
// Left token:
|
|
|
|
//
|
|
|
|
// "<!-- "
|
|
|
|
// ^ left_from
|
|
|
|
// ^ left_to
|
|
|
|
//
|
|
|
|
// Right token:
|
|
|
|
//
|
|
|
|
// " -->"
|
|
|
|
// ^ right_from
|
|
|
|
// ^ right_o
|
|
|
|
let (left_from, left_to, _) = changes[i * 2];
|
|
|
|
let (right_from, right_o, _) = changes[i * 2 + 1];
|
|
|
|
|
|
|
|
*added_chars -= left_to as isize - left_from as isize;
|
|
|
|
|
|
|
|
// We slide the range to the left by the amount of characters
|
|
|
|
// we've deleted so far + the amount of chars deleted for
|
|
|
|
// the left comment token of the current iteration
|
2025-02-02 11:35:33 +08:00
|
|
|
selections.push(Range::new(
|
|
|
|
(range.anchor as isize + *added_chars).try_into().unwrap(),
|
|
|
|
(range.head as isize + *added_chars).try_into().unwrap(),
|
|
|
|
));
|
2025-02-03 02:09:57 +08:00
|
|
|
|
|
|
|
*added_chars -= right_o as isize - right_from as isize;
|
2025-02-02 11:35:33 +08:00
|
|
|
}
|
|
|
|
|
2025-02-02 06:50:44 +08:00
|
|
|
changes
|
2025-02-02 05:14:35 +08:00
|
|
|
} else {
|
2025-02-02 06:50:44 +08:00
|
|
|
// when we add comment tokens, we want to extend our selection to
|
|
|
|
// also include the added tokens.
|
|
|
|
for (i, range) in new_ranges.iter().enumerate() {
|
|
|
|
// will not panic because we're never removing or
|
|
|
|
// creating ranges. Only shifting / increasing size
|
|
|
|
// of existing ranges to accomodate the newly added
|
|
|
|
// comment tokens.
|
|
|
|
let old_range = ranges[i];
|
|
|
|
// Will not underflow because the new range must always be
|
|
|
|
// at least the same size as the old range, since we're
|
|
|
|
// adding comment token characters, never removing.
|
2025-02-02 11:35:33 +08:00
|
|
|
let range = Range::new(
|
|
|
|
range.anchor + *added_chars as usize,
|
|
|
|
range.head + *added_chars as usize,
|
|
|
|
);
|
2025-02-02 06:50:44 +08:00
|
|
|
selections.push(range);
|
2025-02-02 11:35:33 +08:00
|
|
|
*added_chars += range.len() as isize - old_range.len() as isize;
|
2025-02-02 06:50:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
changes
|
2025-02-02 05:14:35 +08:00
|
|
|
}
|
2025-02-02 04:35:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn split_lines_of_range(text: RopeSlice, range: &Range) -> Vec<Range> {
|
|
|
|
let mut ranges = vec![];
|
|
|
|
let (line_start, line_end) = range.line_range(text.slice(..));
|
|
|
|
let mut pos = text.line_to_char(line_start);
|
|
|
|
for line in text.slice(pos..text.line_to_char(line_end + 1)).lines() {
|
|
|
|
let start = pos;
|
|
|
|
pos += line.len_chars();
|
|
|
|
ranges.push(Range::new(start, pos));
|
2024-02-27 21:36:25 +08:00
|
|
|
}
|
2025-02-02 04:35:45 +08:00
|
|
|
ranges
|
2024-02-27 21:36:25 +08:00
|
|
|
}
|
|
|
|
|
2025-02-02 04:35:45 +08:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
mod find_line_comment {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn not_commented() {
|
|
|
|
// four lines, two space indented, except for line 1 which is blank.
|
|
|
|
let doc = Rope::from(" 1\n\n 2\n 3");
|
|
|
|
|
|
|
|
let text = doc.slice(..);
|
|
|
|
|
|
|
|
let res = find_line_comment("//", text, 0..3);
|
|
|
|
// (commented = false, to_change = [line 0, line 2], min = col 2, margin = 0)
|
|
|
|
assert_eq!(res, (false, vec![0, 2], 2, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn is_commented() {
|
|
|
|
// three lines where the second line is empty.
|
|
|
|
let doc = Rope::from("// hello\n\n// there");
|
|
|
|
|
|
|
|
let res = find_line_comment("//", doc.slice(..), 0..3);
|
|
|
|
|
|
|
|
// (commented = true, to_change = [line 0, line 2], min = col 0, margin = 1)
|
|
|
|
assert_eq!(res, (true, vec![0, 2], 0, 1));
|
2024-02-27 21:36:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-02 04:35:45 +08:00
|
|
|
// TODO: account for uncommenting with uneven comment indentation
|
|
|
|
mod toggle_line_comment {
|
2025-02-02 05:14:35 +08:00
|
|
|
use crate::Transaction;
|
|
|
|
|
2025-02-02 04:35:45 +08:00
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn comment() {
|
|
|
|
// four lines, two space indented, except for line 1 which is blank.
|
|
|
|
let mut doc = Rope::from(" 1\n\n 2\n 3");
|
|
|
|
// select whole document
|
|
|
|
let range = Range::new(0, doc.len_chars() - 1);
|
|
|
|
|
|
|
|
let changes = toggle_line_comments(&doc, &range, None);
|
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
|
|
|
|
assert_eq!(doc, " # 1\n\n # 2\n # 3");
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn uncomment() {
|
|
|
|
let mut doc = Rope::from(" # 1\n\n # 2\n # 3");
|
|
|
|
let mut range = Range::new(0, doc.len_chars() - 1);
|
|
|
|
|
|
|
|
let changes = toggle_line_comments(&doc, &range, None);
|
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
range = range.map(transaction.changes());
|
|
|
|
|
|
|
|
assert_eq!(doc, " 1\n\n 2\n 3");
|
2025-02-02 04:53:07 +08:00
|
|
|
assert_eq!(range, range); // to ignore the selection unused warning
|
2025-02-02 04:35:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn uncomment_0_margin_comments() {
|
|
|
|
let mut doc = Rope::from(" #1\n\n #2\n #3");
|
|
|
|
let mut range = Range::new(0, doc.len_chars() - 1);
|
|
|
|
|
|
|
|
let changes = toggle_line_comments(&doc, &range, None);
|
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
range = range.map(transaction.changes());
|
|
|
|
|
|
|
|
assert_eq!(doc, " 1\n\n 2\n 3");
|
2025-02-02 04:53:07 +08:00
|
|
|
assert_eq!(range, range); // to ignore the selection unused warning
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn uncomment_0_margin_comments_with_no_space() {
|
|
|
|
let mut doc = Rope::from("#");
|
|
|
|
let mut range = Range::new(0, doc.len_chars() - 1);
|
|
|
|
|
|
|
|
let changes = toggle_line_comments(&doc, &range, None);
|
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
range = range.map(transaction.changes());
|
|
|
|
assert_eq!(doc, "");
|
|
|
|
assert_eq!(range, range); // to ignore the selection unused warning
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_find_block_comments() {
|
|
|
|
// three lines 5 characters.
|
|
|
|
let mut doc = Rope::from("1\n2\n3");
|
|
|
|
// select whole document
|
|
|
|
let range = Range::new(0, doc.len_chars());
|
|
|
|
|
|
|
|
let text = doc.slice(..);
|
|
|
|
|
|
|
|
let res = find_block_comments(&[BlockCommentToken::default()], text, &vec![range]);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
res,
|
|
|
|
(
|
|
|
|
false,
|
|
|
|
vec![CommentChange::Uncommented {
|
|
|
|
range: Range::new(0, 5),
|
|
|
|
start_pos: 0,
|
|
|
|
end_pos: 4,
|
|
|
|
start_token: "/*".to_string(),
|
|
|
|
end_token: "*/".to_string(),
|
|
|
|
}]
|
|
|
|
)
|
|
|
|
);
|
|
|
|
|
2025-02-02 05:00:58 +08:00
|
|
|
// comment
|
2025-02-02 06:50:44 +08:00
|
|
|
let changes = toggle_block_comments(
|
|
|
|
&doc,
|
|
|
|
&vec![range],
|
|
|
|
&[BlockCommentToken::default()],
|
|
|
|
&mut SmallVec::new(),
|
|
|
|
&mut 0,
|
|
|
|
);
|
2025-02-02 05:00:58 +08:00
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
|
|
|
|
assert_eq!(doc, "/* 1\n2\n3 */");
|
|
|
|
|
|
|
|
// uncomment
|
|
|
|
let range = Range::new(0, doc.len_chars());
|
2025-02-02 06:50:44 +08:00
|
|
|
let changes = toggle_block_comments(
|
|
|
|
&doc,
|
|
|
|
&vec![range],
|
|
|
|
&[BlockCommentToken::default()],
|
|
|
|
&mut SmallVec::new(),
|
|
|
|
&mut 0,
|
|
|
|
);
|
2025-02-02 05:00:58 +08:00
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
assert_eq!(doc, "1\n2\n3");
|
|
|
|
|
|
|
|
// don't panic when there is just a space in comment
|
|
|
|
doc = Rope::from("/* */");
|
|
|
|
let range = Range::new(0, doc.len_chars());
|
2025-02-02 06:50:44 +08:00
|
|
|
let changes = toggle_block_comments(
|
|
|
|
&doc,
|
|
|
|
&vec![range],
|
|
|
|
&[BlockCommentToken::default()],
|
|
|
|
&mut SmallVec::new(),
|
|
|
|
&mut 0,
|
|
|
|
);
|
2025-02-02 05:00:58 +08:00
|
|
|
let transaction = Transaction::change(&doc, changes.into_iter());
|
|
|
|
transaction.apply(&mut doc);
|
|
|
|
assert_eq!(doc, "");
|
2025-02-02 04:35:45 +08:00
|
|
|
}
|
|
|
|
|
2025-02-02 04:53:07 +08:00
|
|
|
/// Test, if `get_comment_tokens` works, even if the content of the file includes chars, whose
|
|
|
|
/// byte size unequal the amount of chars
|
|
|
|
#[test]
|
|
|
|
fn test_get_comment_with_char_boundaries() {
|
|
|
|
let rope = Rope::from("··");
|
|
|
|
let tokens = ["//", "///"];
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
super::get_comment_token(rope.slice(..), tokens.as_slice(), 0),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test for `get_comment_token`.
|
|
|
|
///
|
|
|
|
/// Assuming the comment tokens are stored as `["///", "//"]`, `get_comment_token` should still
|
|
|
|
/// return `///` instead of `//` if the user is in a doc-comment section.
|
|
|
|
#[test]
|
|
|
|
fn test_use_longest_comment() {
|
|
|
|
let text = Rope::from(" /// amogus ඞ");
|
|
|
|
let tokens = ["///", "//"];
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
super::get_comment_token(text.slice(..), tokens.as_slice(), 0),
|
|
|
|
Some("///")
|
|
|
|
);
|
|
|
|
}
|
2025-02-02 04:35:45 +08:00
|
|
|
}
|
|
|
|
}
|