Java有一个方便的分割方法:
String str = "The quick brown fox";
String[] results = str.split(" ");
在c++中有简单的方法来做到这一点吗?
Java有一个方便的分割方法:
String str = "The quick brown fox";
String[] results = str.split(" ");
在c++中有简单的方法来做到这一点吗?
当前回答
您可以使用流、迭代器和复制算法来相当直接地做到这一点。
#include <string>
#include <vector>
#include <iostream>
#include <istream>
#include <ostream>
#include <iterator>
#include <sstream>
#include <algorithm>
int main()
{
std::string str = "The quick brown fox";
// construct a stream from the string
std::stringstream strstr(str);
// use stream iterators to copy the stream to the vector as whitespace separated strings
std::istream_iterator<std::string> it(strstr);
std::istream_iterator<std::string> end;
std::vector<std::string> results(it, end);
// send the vector to stdout.
std::ostream_iterator<std::string> oit(std::cout);
std::copy(results.begin(), results.end(), oit);
}
其他回答
下面是一个示例标记器类,它可以实现您想要的功能
//Header file
class Tokenizer
{
public:
static const std::string DELIMITERS;
Tokenizer(const std::string& str);
Tokenizer(const std::string& str, const std::string& delimiters);
bool NextToken();
bool NextToken(const std::string& delimiters);
const std::string GetToken() const;
void Reset();
protected:
size_t m_offset;
const std::string m_string;
std::string m_token;
std::string m_delimiters;
};
//CPP file
const std::string Tokenizer::DELIMITERS(" \t\n\r");
Tokenizer::Tokenizer(const std::string& s) :
m_string(s),
m_offset(0),
m_delimiters(DELIMITERS) {}
Tokenizer::Tokenizer(const std::string& s, const std::string& delimiters) :
m_string(s),
m_offset(0),
m_delimiters(delimiters) {}
bool Tokenizer::NextToken()
{
return NextToken(m_delimiters);
}
bool Tokenizer::NextToken(const std::string& delimiters)
{
size_t i = m_string.find_first_not_of(delimiters, m_offset);
if (std::string::npos == i)
{
m_offset = m_string.length();
return false;
}
size_t j = m_string.find_first_of(delimiters, i);
if (std::string::npos == j)
{
m_token = m_string.substr(i);
m_offset = m_string.length();
return true;
}
m_token = m_string.substr(i, j - i);
m_offset = j;
return true;
}
例子:
std::vector <std::string> v;
Tokenizer s("split this string", " ");
while (s.NextToken())
{
v.push_back(s.GetToken());
}
这是一个简单的循环,只对标准库文件进行标记
#include <iostream.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <conio.h>
class word
{
public:
char w[20];
word()
{
for(int j=0;j<=20;j++)
{w[j]='\0';
}
}
};
void main()
{
int i=1,n=0,j=0,k=0,m=1;
char input[100];
word ww[100];
gets(input);
n=strlen(input);
for(i=0;i<=m;i++)
{
if(context[i]!=' ')
{
ww[k].w[j]=context[i];
j++;
}
else
{
k++;
j=0;
m++;
}
}
}
在我看来很奇怪的是,SO网站上有这么多注重速度的书呆子,却没有人给出一个使用编译时生成的分隔符查找表的版本(下面是示例实现)。使用查找表和迭代器应该在效率上击败std::regex,如果你不需要击败regex,就使用它,它是c++ 11的标准,超级灵活。
有些人已经建议使用正则表达式,但对于新手来说,这里有一个打包的示例,应该完全符合OP的期望:
std::vector<std::string> split(std::string::const_iterator it, std::string::const_iterator end, std::regex e = std::regex{"\\w+"}){
std::smatch m{};
std::vector<std::string> ret{};
while (std::regex_search (it,end,m,e)) {
ret.emplace_back(m.str());
std::advance(it, m.position() + m.length()); //next start position = match position + match length
}
return ret;
}
std::vector<std::string> split(const std::string &s, std::regex e = std::regex{"\\w+"}){ //comfort version calls flexible version
return split(s.cbegin(), s.cend(), std::move(e));
}
int main ()
{
std::string str {"Some people, excluding those present, have been compile time constants - since puberty."};
auto v = split(str);
for(const auto&s:v){
std::cout << s << std::endl;
}
std::cout << "crazy version:" << std::endl;
v = split(str, std::regex{"[^e]+"}); //using e as delim shows flexibility
for(const auto&s:v){
std::cout << s << std::endl;
}
return 0;
}
如果我们需要更快并接受所有字符必须为8位的约束,我们可以在编译时使用元编程创建一个查找表:
template<bool...> struct BoolSequence{}; //just here to hold bools
template<char...> struct CharSequence{}; //just here to hold chars
template<typename T, char C> struct Contains; //generic
template<char First, char... Cs, char Match> //not first specialization
struct Contains<CharSequence<First, Cs...>,Match> :
Contains<CharSequence<Cs...>, Match>{}; //strip first and increase index
template<char First, char... Cs> //is first specialization
struct Contains<CharSequence<First, Cs...>,First>: std::true_type {};
template<char Match> //not found specialization
struct Contains<CharSequence<>,Match>: std::false_type{};
template<int I, typename T, typename U>
struct MakeSequence; //generic
template<int I, bool... Bs, typename U>
struct MakeSequence<I,BoolSequence<Bs...>, U>: //not last
MakeSequence<I-1, BoolSequence<Contains<U,I-1>::value,Bs...>, U>{};
template<bool... Bs, typename U>
struct MakeSequence<0,BoolSequence<Bs...>,U>{ //last
using Type = BoolSequence<Bs...>;
};
template<typename T> struct BoolASCIITable;
template<bool... Bs> struct BoolASCIITable<BoolSequence<Bs...>>{
/* could be made constexpr but not yet supported by MSVC */
static bool isDelim(const char c){
static const bool table[256] = {Bs...};
return table[static_cast<int>(c)];
}
};
using Delims = CharSequence<'.',',',' ',':','\n'>; //list your custom delimiters here
using Table = BoolASCIITable<typename MakeSequence<256,BoolSequence<>,Delims>::Type>;
有了这些,创建getNextToken函数就很容易了:
template<typename T_It>
std::pair<T_It,T_It> getNextToken(T_It begin,T_It end){
begin = std::find_if(begin,end,std::not1(Table{})); //find first non delim or end
auto second = std::find_if(begin,end,Table{}); //find first delim or end
return std::make_pair(begin,second);
}
使用它也很简单:
int main() {
std::string s{"Some people, excluding those present, have been compile time constants - since puberty."};
auto it = std::begin(s);
auto end = std::end(s);
while(it != std::end(s)){
auto token = getNextToken(it,end);
std::cout << std::string(token.first,token.second) << std::endl;
it = token.second;
}
return 0;
}
这里有一个生动的例子:http://ideone.com/GKtkLQ
简单的c++代码(标准c++ 98),接受多个分隔符(在std::string中指定),只使用向量、字符串和迭代器。
#include <iostream>
#include <vector>
#include <string>
#include <stdexcept>
std::vector<std::string>
split(const std::string& str, const std::string& delim){
std::vector<std::string> result;
if (str.empty())
throw std::runtime_error("Can not tokenize an empty string!");
std::string::const_iterator begin, str_it;
begin = str_it = str.begin();
do {
while (delim.find(*str_it) == std::string::npos && str_it != str.end())
str_it++; // find the position of the first delimiter in str
std::string token = std::string(begin, str_it); // grab the token
if (!token.empty()) // empty token only when str starts with a delimiter
result.push_back(token); // push the token into a vector<string>
while (delim.find(*str_it) != std::string::npos && str_it != str.end())
str_it++; // ignore the additional consecutive delimiters
begin = str_it; // process the remaining tokens
} while (str_it != str.end());
return result;
}
int main() {
std::string test_string = ".this is.a.../.simple;;test;;;END";
std::string delim = "; ./"; // string containing the delimiters
std::vector<std::string> tokens = split(test_string, delim);
for (std::vector<std::string>::const_iterator it = tokens.begin();
it != tokens.end(); it++)
std::cout << *it << std::endl;
}
下面是我的Swiss®军刀字符串标记器,用于用空格分隔字符串,处理单引号和双引号包装的字符串,以及从结果中剥离这些字符。我使用RegexBuddy 4。x生成大部分代码片段,但我添加了用于剥离引号和其他一些东西的自定义处理。
#include <string>
#include <locale>
#include <regex>
std::vector<std::wstring> tokenize_string(std::wstring string_to_tokenize) {
std::vector<std::wstring> tokens;
std::wregex re(LR"(("[^"]*"|'[^']*'|[^"' ]+))", std::regex_constants::collate);
std::wsregex_iterator next( string_to_tokenize.begin(),
string_to_tokenize.end(),
re,
std::regex_constants::match_not_null );
std::wsregex_iterator end;
const wchar_t single_quote = L'\'';
const wchar_t double_quote = L'\"';
while ( next != end ) {
std::wsmatch match = *next;
const std::wstring token = match.str( 0 );
next++;
if (token.length() > 2 && (token.front() == double_quote || token.front() == single_quote))
tokens.emplace_back( std::wstring(token.begin()+1, token.begin()+token.length()-1) );
else
tokens.emplace_back(token);
}
return tokens;
}