Java有一个方便的分割方法:

String str = "The quick brown fox";
String[] results = str.split(" ");

在c++中有简单的方法来做到这一点吗?


当前回答

这是一个简单的循环,只对标准库文件进行标记

#include <iostream.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <conio.h>
class word
    {
     public:
     char w[20];
     word()
      {
        for(int j=0;j<=20;j++)
        {w[j]='\0';
      }
   }



};

void main()
  {
    int i=1,n=0,j=0,k=0,m=1;
    char input[100];
    word ww[100];
    gets(input);

    n=strlen(input);


    for(i=0;i<=m;i++)
      {
        if(context[i]!=' ')
         {
            ww[k].w[j]=context[i];
            j++;

         }
         else
        {
         k++;
         j=0;
         m++;
        }

   }
 }

其他回答

我以前只用标准库做了一个lexer/tokenizer。代码如下:

#include <iostream>
#include <string>
#include <vector>
#include <sstream>

using namespace std;

string seps(string& s) {
    if (!s.size()) return "";
    stringstream ss;
    ss << s[0];
    for (int i = 1; i < s.size(); i++) {
        ss << '|' << s[i];
    }
    return ss.str();
}

void Tokenize(string& str, vector<string>& tokens, const string& delimiters = " ")
{
    seps(str);

    // Skip delimiters at beginning.
    string::size_type lastPos = str.find_first_not_of(delimiters, 0);
    // Find first "non-delimiter".
    string::size_type pos = str.find_first_of(delimiters, lastPos);

    while (string::npos != pos || string::npos != lastPos)
    {
        // Found a token, add it to the vector.
        tokens.push_back(str.substr(lastPos, pos - lastPos));
        // Skip delimiters.  Note the "not_of"
        lastPos = str.find_first_not_of(delimiters, pos);
        // Find next "non-delimiter"
        pos = str.find_first_of(delimiters, lastPos);
    }
}

int main(int argc, char *argv[])
{
    vector<string> t;
    string s = "Tokens for everyone!";

    Tokenize(s, t, "|");

    for (auto c : t)
        cout << c << endl;

    system("pause");

    return 0;
}

下面是一个示例标记器类,它可以实现您想要的功能

//Header file
class Tokenizer 
{
    public:
        static const std::string DELIMITERS;
        Tokenizer(const std::string& str);
        Tokenizer(const std::string& str, const std::string& delimiters);
        bool NextToken();
        bool NextToken(const std::string& delimiters);
        const std::string GetToken() const;
        void Reset();
    protected:
        size_t m_offset;
        const std::string m_string;
        std::string m_token;
        std::string m_delimiters;
};

//CPP file
const std::string Tokenizer::DELIMITERS(" \t\n\r");

Tokenizer::Tokenizer(const std::string& s) :
    m_string(s), 
    m_offset(0), 
    m_delimiters(DELIMITERS) {}

Tokenizer::Tokenizer(const std::string& s, const std::string& delimiters) :
    m_string(s), 
    m_offset(0), 
    m_delimiters(delimiters) {}

bool Tokenizer::NextToken() 
{
    return NextToken(m_delimiters);
}

bool Tokenizer::NextToken(const std::string& delimiters) 
{
    size_t i = m_string.find_first_not_of(delimiters, m_offset);
    if (std::string::npos == i) 
    {
        m_offset = m_string.length();
        return false;
    }

    size_t j = m_string.find_first_of(delimiters, i);
    if (std::string::npos == j) 
    {
        m_token = m_string.substr(i);
        m_offset = m_string.length();
        return true;
    }

    m_token = m_string.substr(i, j - i);
    m_offset = j;
    return true;
}

例子:

std::vector <std::string> v;
Tokenizer s("split this string", " ");
while (s.NextToken())
{
    v.push_back(s.GetToken());
}

下面是我的Swiss®军刀字符串标记器,用于用空格分隔字符串,处理单引号和双引号包装的字符串,以及从结果中剥离这些字符。我使用RegexBuddy 4。x生成大部分代码片段,但我添加了用于剥离引号和其他一些东西的自定义处理。

#include <string>
#include <locale>
#include <regex>

std::vector<std::wstring> tokenize_string(std::wstring string_to_tokenize) {
    std::vector<std::wstring> tokens;

    std::wregex re(LR"(("[^"]*"|'[^']*'|[^"' ]+))", std::regex_constants::collate);

    std::wsregex_iterator next( string_to_tokenize.begin(),
                                string_to_tokenize.end(),
                                re,
                                std::regex_constants::match_not_null );

    std::wsregex_iterator end;
    const wchar_t single_quote = L'\'';
    const wchar_t double_quote = L'\"';
    while ( next != end ) {
        std::wsmatch match = *next;
        const std::wstring token = match.str( 0 );
        next++;

        if (token.length() > 2 && (token.front() == double_quote || token.front() == single_quote))
            tokens.emplace_back( std::wstring(token.begin()+1, token.begin()+token.length()-1) );
        else
            tokens.emplace_back(token);
    }
    return tokens;
}

这里有许多过于复杂的建议。试试这个简单的std::string解决方案:

using namespace std;

string someText = ...

string::size_type tokenOff = 0, sepOff = tokenOff;
while (sepOff != string::npos)
{
    sepOff = someText.find(' ', sepOff);
    string::size_type tokenLen = (sepOff == string::npos) ? sepOff : sepOff++ - tokenOff;
    string token = someText.substr(tokenOff, tokenLen);
    if (!token.empty())
        /* do something with token */;
    tokenOff = sepOff;
}

您可以使用流、迭代器和复制算法来相当直接地做到这一点。

#include <string>
#include <vector>
#include <iostream>
#include <istream>
#include <ostream>
#include <iterator>
#include <sstream>
#include <algorithm>

int main()
{
  std::string str = "The quick brown fox";

  // construct a stream from the string
  std::stringstream strstr(str);

  // use stream iterators to copy the stream to the vector as whitespace separated strings
  std::istream_iterator<std::string> it(strstr);
  std::istream_iterator<std::string> end;
  std::vector<std::string> results(it, end);

  // send the vector to stdout.
  std::ostream_iterator<std::string> oit(std::cout);
  std::copy(results.begin(), results.end(), oit);
}