mirror of https://github.com/helix-editor/helix
feat: continue comment uses injected comment tokens
parent
5b30bfe36e
commit
c72755437a
|
@ -11,19 +11,56 @@ use std::borrow::Cow;
|
|||
pub const DEFAULT_COMMENT_TOKEN: &str = "#";
|
||||
|
||||
/// Returns the longest matching comment token of the given line (if it exists).
|
||||
pub fn get_comment_token<'a, S: AsRef<str>>(
|
||||
pub fn get_comment_token(
|
||||
syntax: Option<&Syntax>,
|
||||
text: RopeSlice,
|
||||
tokens: &'a [S],
|
||||
doc_default_tokens: Option<&Vec<String>>,
|
||||
line_num: usize,
|
||||
) -> Option<&'a str> {
|
||||
) -> Option<String> {
|
||||
let line = text.line(line_num);
|
||||
let start = line.first_non_whitespace_char()?;
|
||||
let start_char = text.line_to_char(line_num) + start;
|
||||
|
||||
tokens
|
||||
.iter()
|
||||
.map(AsRef::as_ref)
|
||||
.filter(|token| line.slice(start..).starts_with(token))
|
||||
.max_by_key(|token| token.len())
|
||||
let injected_tokens = get_injected_tokens(syntax, start_char, start_char)
|
||||
// we don't care about block comment tokens
|
||||
.0
|
||||
.and_then(|tokens| {
|
||||
tokens
|
||||
.into_iter()
|
||||
.filter(|token| line.slice(start..).starts_with(token))
|
||||
.max_by_key(|token| token.len())
|
||||
});
|
||||
|
||||
injected_tokens.or(
|
||||
// no comment tokens found for injection. Use doc tokens instead
|
||||
doc_default_tokens.and_then(|tokens| {
|
||||
tokens
|
||||
.iter()
|
||||
.filter(|token| line.slice(start..).starts_with(token))
|
||||
.max_by_key(|token| token.len())
|
||||
.cloned()
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_injected_tokens(
|
||||
syntax: Option<&Syntax>,
|
||||
start: usize,
|
||||
end: usize,
|
||||
) -> (Option<Vec<String>>, Option<Vec<BlockCommentToken>>) {
|
||||
// Find the injection with the most tightly encompassing range.
|
||||
syntax
|
||||
.and_then(|syntax| {
|
||||
injection_for_range(syntax, start, end)
|
||||
.map(|language_id| syntax.layer_config(language_id))
|
||||
.map(|config| {
|
||||
(
|
||||
config.comment_tokens.clone(),
|
||||
config.block_comment_tokens.clone(),
|
||||
)
|
||||
})
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// For a given range in the document, get the most tightly encompassing
|
||||
|
@ -58,26 +95,6 @@ pub fn injection_for_range(syntax: &Syntax, from: usize, to: usize) -> Option<La
|
|||
best_fit
|
||||
}
|
||||
|
||||
pub fn get_injected_tokens(
|
||||
syntax: Option<&Syntax>,
|
||||
start: usize,
|
||||
end: usize,
|
||||
) -> (Option<Vec<String>>, Option<Vec<BlockCommentToken>>) {
|
||||
// Find the injection with the most tightly encompassing range.
|
||||
syntax
|
||||
.and_then(|syntax| {
|
||||
injection_for_range(syntax, start, end)
|
||||
.map(|language_id| syntax.layer_config(language_id))
|
||||
.map(|config| {
|
||||
(
|
||||
config.comment_tokens.clone(),
|
||||
config.block_comment_tokens.clone(),
|
||||
)
|
||||
})
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Given text, a comment token, and a set of line indices, returns the following:
|
||||
/// - Whether the given lines should be considered commented
|
||||
/// - If any of the lines are uncommented, all lines are considered as such.
|
||||
|
@ -599,10 +616,10 @@ mod test {
|
|||
#[test]
|
||||
fn test_get_comment_with_char_boundaries() {
|
||||
let rope = Rope::from("··");
|
||||
let tokens = ["//", "///"];
|
||||
let tokens = vec!["//".to_owned(), "///".to_owned()];
|
||||
|
||||
assert_eq!(
|
||||
super::get_comment_token(rope.slice(..), tokens.as_slice(), 0),
|
||||
super::get_comment_token(None, rope.slice(..), Some(&tokens), 0),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
@ -614,11 +631,11 @@ mod test {
|
|||
#[test]
|
||||
fn test_use_longest_comment() {
|
||||
let text = Rope::from(" /// amogus ඞ");
|
||||
let tokens = ["///", "//"];
|
||||
let tokens = vec!["///".to_owned(), "//".to_owned()];
|
||||
|
||||
assert_eq!(
|
||||
super::get_comment_token(text.slice(..), tokens.as_slice(), 0),
|
||||
Some("///")
|
||||
super::get_comment_token(None, text.slice(..), Some(&tokens), 0),
|
||||
Some("///".to_owned())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3623,13 +3623,14 @@ fn open(cx: &mut Context, open: Open, comment_continuation: CommentContinuation)
|
|||
|
||||
let mut ranges = SmallVec::with_capacity(selection.len());
|
||||
|
||||
let continue_comment_tokens =
|
||||
if comment_continuation == CommentContinuation::Enabled && config.continue_comments {
|
||||
doc.language_config()
|
||||
.and_then(|config| config.comment_tokens.as_ref())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let continue_comments =
|
||||
comment_continuation == CommentContinuation::Enabled && config.continue_comments;
|
||||
|
||||
let doc_default_tokens = doc
|
||||
.language_config()
|
||||
.and_then(|config| config.comment_tokens.as_ref());
|
||||
|
||||
let syntax = doc.syntax();
|
||||
|
||||
let mut transaction = Transaction::change_by_selection(contents, selection, |range| {
|
||||
// the line number, where the cursor is currently
|
||||
|
@ -3646,8 +3647,9 @@ fn open(cx: &mut Context, open: Open, comment_continuation: CommentContinuation)
|
|||
|
||||
let above_next_new_line_num = next_new_line_num.saturating_sub(1);
|
||||
|
||||
let continue_comment_token = continue_comment_tokens
|
||||
.and_then(|tokens| comment::get_comment_token(text, tokens, curr_line_num));
|
||||
let continue_comment_token =
|
||||
comment::get_comment_token(syntax, text, doc_default_tokens, curr_line_num)
|
||||
.filter(|_| continue_comments);
|
||||
|
||||
// Index to insert newlines after, as well as the char width
|
||||
// to use to compensate for those inserted newlines.
|
||||
|
@ -3681,7 +3683,7 @@ fn open(cx: &mut Context, open: Open, comment_continuation: CommentContinuation)
|
|||
|
||||
if open == Open::Above && next_new_line_num == 0 {
|
||||
text.push_str(&indent);
|
||||
if let Some(token) = continue_comment_token {
|
||||
if let Some(ref token) = continue_comment_token {
|
||||
text.push_str(token);
|
||||
text.push(' ');
|
||||
}
|
||||
|
@ -3690,7 +3692,7 @@ fn open(cx: &mut Context, open: Open, comment_continuation: CommentContinuation)
|
|||
text.push_str(doc.line_ending.as_str());
|
||||
text.push_str(&indent);
|
||||
|
||||
if let Some(token) = continue_comment_token {
|
||||
if let Some(ref token) = continue_comment_token {
|
||||
text.push_str(token);
|
||||
text.push(' ');
|
||||
}
|
||||
|
@ -4139,12 +4141,11 @@ pub mod insert {
|
|||
let mut global_offs = 0;
|
||||
let mut new_text = String::new();
|
||||
|
||||
let continue_comment_tokens = if config.continue_comments {
|
||||
doc.language_config()
|
||||
.and_then(|config| config.comment_tokens.as_ref())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let doc_default_comment_token = doc
|
||||
.language_config()
|
||||
.and_then(|config| config.comment_tokens.as_ref());
|
||||
|
||||
let syntax = doc.syntax();
|
||||
|
||||
let mut transaction = Transaction::change_by_selection(contents, selection, |range| {
|
||||
// Tracks the number of trailing whitespace characters deleted by this selection.
|
||||
|
@ -4161,8 +4162,9 @@ pub mod insert {
|
|||
let current_line = text.char_to_line(pos);
|
||||
let line_start = text.line_to_char(current_line);
|
||||
|
||||
let continue_comment_token = continue_comment_tokens
|
||||
.and_then(|tokens| comment::get_comment_token(text, tokens, current_line));
|
||||
let continue_comment_token =
|
||||
comment::get_comment_token(syntax, text, doc_default_comment_token, current_line)
|
||||
.filter(|_| config.continue_comments);
|
||||
|
||||
let (from, to, local_offs) = if let Some(idx) =
|
||||
text.slice(line_start..pos).last_non_whitespace_char()
|
||||
|
@ -4197,7 +4199,7 @@ pub mod insert {
|
|||
new_text.reserve_exact(line_ending.len() + indent.len() + token.len() + 1);
|
||||
new_text.push_str(line_ending);
|
||||
new_text.push_str(&indent);
|
||||
new_text.push_str(token);
|
||||
new_text.push_str(&token);
|
||||
new_text.push(' ');
|
||||
new_text.chars().count()
|
||||
} else if on_auto_pair {
|
||||
|
|
Loading…
Reference in New Issue