From 76687f53898e6bbc0e7b166bd822822734d918ef Mon Sep 17 00:00:00 2001 From: Nik Revenco <154856872+NikitaRevenco@users.noreply.github.com> Date: Sun, 25 May 2025 00:46:07 +0100 Subject: [PATCH] refactor: Rename varibles + Remove accidentally commited files --- helix-core/src/comment.rs | 139 +++++++++++++++++-------------------- helix-term/src/commands.rs | 32 +++++---- index.html | 5 -- xtask/src/querycheck.rs | 39 ----------- 4 files changed, 84 insertions(+), 131 deletions(-) delete mode 100644 index.html delete mode 100644 xtask/src/querycheck.rs diff --git a/helix-core/src/comment.rs b/helix-core/src/comment.rs index 8cfd8b42f..5e5c58f8b 100644 --- a/helix-core/src/comment.rs +++ b/helix-core/src/comment.rs @@ -12,8 +12,8 @@ use std::borrow::Cow; pub const DEFAULT_COMMENT_TOKEN: &str = "#"; -/// Returns the longest matching comment token of the given line (if it exists). -pub fn get_comment_token( +/// Returns the longest matching line comment token of the given line (if it exists). +pub fn get_line_comment_token( loader: &syntax::Loader, syntax: Option<&Syntax>, text: RopeSlice, @@ -24,30 +24,32 @@ pub fn get_comment_token( let start = line.first_non_whitespace_char()?; let start_char = text.line_to_char(line_num) + start; - let injected_tokens = get_injected_tokens(loader, syntax, start_char as u32, start_char as u32) - // we only care about line comment tokens - .0 - .and_then(|tokens| { - tokens - .into_iter() - .filter(|token| line.slice(start..).starts_with(token)) - .max_by_key(|token| token.len()) - }); + let injected_line_comment_tokens = + injected_tokens_for_range(loader, syntax, start_char as u32, start_char as u32) + .0 + .and_then(|tokens| { + tokens + .into_iter() + .filter(|token| line.slice(start..).starts_with(token)) + .max_by_key(|token| token.len()) + }); - injected_tokens.or( - // no comment tokens found for injection, use doc comments if exists + injected_line_comment_tokens.or_else(|| + // no line comment tokens found for injection, use doc comments if exists doc_default_tokens.and_then(|tokens| { tokens .iter() .filter(|token| line.slice(start..).starts_with(token)) .max_by_key(|token| token.len()) .cloned() - }), - ) + })) } -/// Find the injection with the most tightly encompassing range. -pub fn get_injected_tokens( +/// Get the injected line and block comment of the smallest +/// injection around the range which fully includes `start..=end`. +/// +/// Injections that do not have any comment tokens are skipped. +pub fn injected_tokens_for_range( loader: &syntax::Loader, syntax: Option<&Syntax>, start: u32, @@ -69,8 +71,8 @@ pub fn get_injected_tokens( // if the language does not have any comment tokens, it does not make // any sense to consider it. // - // This includes languages such as comment, jsdoc and regex: These - // languages are injected and never found in files by themselves + // This includes languages such as `comment`, `jsdoc` and `regex`. + // These languages are injected and never found in files by themselves has_any_comment_tokens.then_some(( lang_config.comment_tokens.clone(), lang_config.block_comment_tokens.clone(), @@ -80,19 +82,20 @@ pub fn get_injected_tokens( .unwrap_or_default() } -/// Given text, a comment token, and a set of line indices, returns the following: -/// - Whether the given lines should be considered commented +/// Given `text`, a comment `token`, and a set of line indices `lines_to_modify`, +/// Returns the following: +/// 1. Whether the given lines should be considered commented /// - If any of the lines are uncommented, all lines are considered as such. -/// - The lines to change for toggling comments +/// 2. The lines to change for toggling comments /// - This is all provided lines excluding blanks lines. -/// - The column of the comment tokens +/// 3. The column of the comment tokens /// - Column of existing tokens, if the lines are commented; column to place tokens at otherwise. -/// - The margin to the right of the comment tokens +/// 4. The margin to the right of the comment tokens /// - Defaults to `1`. If any existing comment token is not followed by a space, changes to `0`. fn find_line_comment( token: &str, text: RopeSlice, - lines: impl IntoIterator, + lines_to_modify: impl IntoIterator, ) -> (bool, Vec, usize, usize) { let mut commented = true; let mut to_change = Vec::new(); @@ -100,7 +103,7 @@ fn find_line_comment( let mut margin = 1; let token_len = token.chars().count(); - for line in lines { + for line in lines_to_modify { let line_slice = text.line(line); if let Some(pos) = line_slice.first_non_whitespace_char() { let len = line_slice.len_chars(); @@ -130,39 +133,55 @@ fn find_line_comment( (commented, to_change, min, margin) } +/// Returns the edits required to toggle the comment `token` for the `range` in the `doc` #[must_use] pub fn toggle_line_comments(doc: &Rope, range: &Range, token: Option<&str>) -> Vec { let text = doc.slice(..); let token = token.unwrap_or(DEFAULT_COMMENT_TOKEN); + + // Add a space between the comment token and the line. let comment = Tendril::from(format!("{} ", token)); - let start = text.char_to_line(range.from()); - let end = text.char_to_line(range.to().saturating_sub(1)); let line_count = text.len_lines(); - let start = start.clamp(0, line_count); - let end = (end + 1).min(line_count); - let mut lines = vec![]; - lines.extend(start..end); + let start = text.char_to_line(range.from()).clamp(0, line_count); + let end = (text.char_to_line(range.to().saturating_sub(1)) + 1).min(line_count); - let (was_commented, to_change, min, margin) = find_line_comment(token, text, lines); + let lines_to_modify = start..end; - let mut changes: Vec = Vec::with_capacity(to_change.len()); + let ( + was_commented, + lines_to_modify, + column_to_place_comment_tokens_at, + comment_tokens_right_margin, + ) = find_line_comment(token, text, lines_to_modify); - for line in to_change { - let pos = text.line_to_char(line) + min; + lines_to_modify + .into_iter() + .map(|line| { + let place_comment_tokens_at = + text.line_to_char(line) + column_to_place_comment_tokens_at; - if !was_commented { - // comment line - changes.push((pos, pos, Some(comment.clone()))); - } else { - // uncomment line - changes.push((pos, pos + token.len() + margin, None)); - } - } - - changes + if !was_commented { + // comment line + ( + place_comment_tokens_at, + place_comment_tokens_at, + // insert the token + Some(comment.clone()), + ) + } else { + // uncomment line + ( + place_comment_tokens_at, + place_comment_tokens_at + token.len() + comment_tokens_right_margin, + // remove the token - replace range with nothing + None, + ) + } + }) + .collect() } #[derive(Debug, PartialEq, Eq)] @@ -582,33 +601,5 @@ mod test { transaction.apply(&mut doc); assert_eq!(doc, ""); } - - // Test, if `get_comment_tokens` works, even if the content of the file includes chars, whose - // byte size unequal the amount of chars - // #[test] - // fn test_get_comment_with_char_boundaries() { - // let rope = Rope::from("··"); - // let tokens = vec!["//".to_owned(), "///".to_owned()]; - - // assert_eq!( - // super::get_comment_token(None, rope.slice(..), Some(&tokens), 0), - // None - // ); - // } - - // /// Test for `get_comment_token`. - // /// - // /// Assuming the comment tokens are stored as `["///", "//"]`, `get_comment_token` should still - // /// return `///` instead of `//` if the user is in a doc-comment section. - // #[test] - // fn test_use_longest_comment() { - // let text = Rope::from(" /// amogus ඞ"); - // let tokens = vec!["///".to_owned(), "//".to_owned()]; - - // assert_eq!( - // super::get_comment_token(None, text.slice(..), Some(&tokens), 0), - // Some("///".to_owned()) - // ); - // } } } diff --git a/helix-term/src/commands.rs b/helix-term/src/commands.rs index 31a28f6bb..22208c0f5 100644 --- a/helix-term/src/commands.rs +++ b/helix-term/src/commands.rs @@ -3671,9 +3671,14 @@ fn open(cx: &mut Context, open: Open, comment_continuation: CommentContinuation) let above_next_new_line_num = next_new_line_num.saturating_sub(1); - let continue_comment_token = - comment::get_comment_token(&loader, syntax, text, doc_default_tokens, curr_line_num) - .filter(|_| continue_comments); + let continue_comment_token = comment::get_line_comment_token( + &loader, + syntax, + text, + doc_default_tokens, + curr_line_num, + ) + .filter(|_| continue_comments); // Index to insert newlines after, as well as the char width // to use to compensate for those inserted newlines. @@ -4225,7 +4230,7 @@ pub mod insert { let current_line = text.char_to_line(pos); let line_start = text.line_to_char(current_line); - let continue_comment_token = comment::get_comment_token( + let continue_comment_token = comment::get_line_comment_token( &doc.syn_loader.load(), syntax, text, @@ -5164,12 +5169,13 @@ pub fn completion(cx: &mut Context) { // comments -/// commenting behavior, for each range in selection: -/// 1. only line comment tokens -> line comment -/// 2. each line block commented -> uncomment all lines -/// 3. whole selection block commented -> uncomment selection -/// 4. all lines not commented and block tokens -> comment uncommented lines -/// 5. no comment tokens and not block commented -> line comment +/// Commenting behavior, for each range in selection: +/// +/// 1. Only line comment tokens -> line comment +/// 2. Each line block commented -> uncomment all lines +/// 3. Whole selection block commented -> uncomment selection +/// 4. All lines not commented and block tokens -> comment uncommented lines +/// 5. No comment tokens and not block commented -> line comment fn toggle_comments_impl(cx: &mut Context, comments_transaction: F) where F: Fn( @@ -5219,7 +5225,7 @@ fn toggle_comments(cx: &mut Context) { rope, selection.iter().flat_map(|range| { let (injected_line_tokens, injected_block_tokens) = - comment::get_injected_tokens( + comment::injected_tokens_for_range( loader, syntax, range.from() as u32, @@ -5301,7 +5307,7 @@ fn toggle_line_comments(cx: &mut Context) { rope, selection.iter().flat_map(|range| { let (injected_line_tokens, injected_block_tokens) = - comment::get_injected_tokens( + comment::injected_tokens_for_range( loader, syntax, range.from() as u32, @@ -5355,7 +5361,7 @@ fn toggle_block_comments(cx: &mut Context) { rope, selection.iter().flat_map(|range| { let (injected_line_tokens, injected_block_tokens) = - comment::get_injected_tokens( + comment::injected_tokens_for_range( loader, syntax, range.from() as u32, diff --git a/index.html b/index.html deleted file mode 100644 index 190a542b7..000000000 --- a/index.html +++ /dev/null @@ -1,5 +0,0 @@ - - diff --git a/xtask/src/querycheck.rs b/xtask/src/querycheck.rs deleted file mode 100644 index 0915288fc..000000000 --- a/xtask/src/querycheck.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::DynError; - -pub fn query_check() -> Result<(), DynError> { - use crate::helpers::lang_config; - use helix_core::{syntax::read_query, tree_sitter::Query}; - use helix_loader::grammar::get_language; - - let query_files = [ - "highlights.scm", - "locals.scm", - "injections.scm", - "textobjects.scm", - "indents.scm", - ]; - - for language in lang_config().language { - let language_name = &language.language_name; - let grammar_name = language.grammar.as_ref().unwrap_or(language_name); - for query_file in query_files { - let language = get_language(grammar_name); - let query_text = read_query(language_name, query_file); - if let Ok(lang) = language { - if !query_text.is_empty() { - if let Err(reason) = Query::new(&lang, &query_text) { - return Err(format!( - "Failed to parse {} queries for {}: {}", - query_file, language_name, reason - ) - .into()); - } - } - } - } - } - - println!("Query check succeeded"); - - Ok(()) -}